Author: szetszwo
Date: Wed Apr 17 02:41:38 2013
New Revision: 1468725

URL: http://svn.apache.org/r1468725
Log:
HDFS-4550. Refactor INodeDirectory.INodesInPath to a standalone class.

Added:
    
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
Modified:
    
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
    
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
    
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
    
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
    
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
    
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
    
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
    
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java

Modified: 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt?rev=1468725&r1=1468724&r2=1468725&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
 (original)
+++ 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
 Wed Apr 17 02:41:38 2013
@@ -247,3 +247,6 @@ Branch-2802 Snapshot (Unreleased)
 
   HDFS-4529. Disallow concat when one of the src files is in some snapshot.
   (szetszwo)
+
+  HDFS-4550. Refactor INodeDirectory.INodesInPath to a standalone class.
+  (szetszwo)

Modified: 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1468725&r1=1468724&r2=1468725&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 (original)
+++ 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 Wed Apr 17 02:41:38 2013
@@ -61,7 +61,6 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
-import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
@@ -1536,7 +1535,7 @@ public class FSDirectory implements Clos
 
   INodesInPath getExistingPathINodes(byte[][] components)
       throws UnresolvedLinkException {
-    return rootDir.getExistingPathINodes(components, components.length, false);
+    return INodesInPath.resolve(rootDir, components);
   }
 
   /**

Modified: 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1468725&r1=1468724&r2=1468725&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
 (original)
+++ 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
 Wed Apr 17 02:41:38 2013
@@ -64,7 +64,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
-import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
 import org.apache.hadoop.hdfs.util.Holder;
 

Modified: 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1468725&r1=1468724&r2=1468725&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
 (original)
+++ 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
 Wed Apr 17 02:41:38 2013
@@ -51,7 +51,6 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
-import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;

Modified: 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1468725&r1=1468724&r2=1468725&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 (original)
+++ 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 Wed Apr 17 02:41:38 2013
@@ -171,7 +171,6 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
-import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;

Modified: 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java?rev=1468725&r1=1468724&r2=1468725&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
 (original)
+++ 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
 Wed Apr 17 02:41:38 2013
@@ -29,7 +29,6 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;

Modified: 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1468725&r1=1468724&r2=1468725&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
 (original)
+++ 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
 Wed Apr 17 02:41:38 2013
@@ -600,24 +600,6 @@ public abstract class INode implements D
     return StringUtils.split(path, Path.SEPARATOR_CHAR);
   }
 
-  /**
-   * Given some components, create a path name.
-   * @param components The path components
-   * @param start index
-   * @param end index
-   * @return concatenated path
-   */
-  static String constructPath(byte[][] components, int start, int end) {
-    StringBuilder buf = new StringBuilder();
-    for (int i = start; i < end; i++) {
-      buf.append(DFSUtil.bytes2String(components[i]));
-      if (i < end - 1) {
-        buf.append(Path.SEPARATOR);
-      }
-    }
-    return buf.toString();
-  }
-
   @Override
   public final int compareTo(byte[] bytes) {
     final byte[] name = getLocalNameBytes();

Modified: 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1468725&r1=1468724&r2=1468725&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
 (original)
+++ 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
 Wed Apr 17 02:41:38 2013
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.na
 import java.io.FileNotFoundException;
 import java.io.PrintWriter;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
@@ -29,9 +28,7 @@ import org.apache.hadoop.fs.PathIsNotDir
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.server.namenode.Content.CountsMap.Key;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
@@ -309,14 +306,14 @@ public class INodeDirectory extends INod
   /** @return the {@link INodesInPath} containing only the last inode. */
   INodesInPath getLastINodeInPath(String path, boolean resolveLink
       ) throws UnresolvedLinkException {
-    return getExistingPathINodes(getPathComponents(path), 1, resolveLink);
+    return INodesInPath.resolve(this, getPathComponents(path), 1, resolveLink);
   }
 
   /** @return the {@link INodesInPath} containing all inodes in the path. */
   INodesInPath getINodesInPath(String path, boolean resolveLink
       ) throws UnresolvedLinkException {
     final byte[][] components = getPathComponents(path);
-    return getExistingPathINodes(components, components.length, resolveLink);
+    return INodesInPath.resolve(this, components, components.length, 
resolveLink);
   }
 
   /** @return the last inode in the path. */
@@ -344,7 +341,7 @@ public class INodeDirectory extends INod
   INodesInPath getINodesInPath4Write(String src, boolean resolveLink)
       throws UnresolvedLinkException, SnapshotAccessControlException {
     final byte[][] components = INode.getPathComponents(src);
-    INodesInPath inodesInPath = getExistingPathINodes(components,
+    INodesInPath inodesInPath = INodesInPath.resolve(this, components,
         components.length, resolveLink);
     if (inodesInPath.isSnapshot()) {
       throw new SnapshotAccessControlException(
@@ -354,170 +351,6 @@ public class INodeDirectory extends INod
   }
 
   /**
-   * Retrieve existing INodes from a path. If existing is big enough to store
-   * all path components (existing and non-existing), then existing INodes
-   * will be stored starting from the root INode into existing[0]; if
-   * existing is not big enough to store all path components, then only the
-   * last existing and non existing INodes will be stored so that
-   * existing[existing.length-1] refers to the INode of the final component.
-   * 
-   * An UnresolvedPathException is always thrown when an intermediate path 
-   * component refers to a symbolic link. If the final path component refers 
-   * to a symbolic link then an UnresolvedPathException is only thrown if
-   * resolveLink is true.  
-   * 
-   * <p>
-   * Example: <br>
-   * Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the
-   * following path components: ["","c1","c2","c3"],
-   * 
-   * <p>
-   * <code>getExistingPathINodes(["","c1","c2"], [?])</code> should fill the
-   * array with [c2] <br>
-   * <code>getExistingPathINodes(["","c1","c2","c3"], [?])</code> should fill 
the
-   * array with [null]
-   * 
-   * <p>
-   * <code>getExistingPathINodes(["","c1","c2"], [?,?])</code> should fill the
-   * array with [c1,c2] <br>
-   * <code>getExistingPathINodes(["","c1","c2","c3"], [?,?])</code> should fill
-   * the array with [c2,null]
-   * 
-   * <p>
-   * <code>getExistingPathINodes(["","c1","c2"], [?,?,?,?])</code> should fill
-   * the array with [rootINode,c1,c2,null], <br>
-   * <code>getExistingPathINodes(["","c1","c2","c3"], [?,?,?,?])</code> should
-   * fill the array with [rootINode,c1,c2,null]
-   * 
-   * @param components array of path component name
-   * @param numOfINodes number of INodes to return
-   * @param resolveLink indicates whether UnresolvedLinkException should
-   *        be thrown when the path refers to a symbolic link.
-   * @return the specified number of existing INodes in the path
-   */
-  INodesInPath getExistingPathINodes(byte[][] components, int numOfINodes, 
-      boolean resolveLink) throws UnresolvedLinkException {
-    assert this.compareTo(components[0]) == 0 :
-        "Incorrect name " + getLocalName() + " expected "
-        + (components[0] == null? null: DFSUtil.bytes2String(components[0]));
-
-    INodesInPath existing = new INodesInPath(components, numOfINodes);
-    INode curNode = this;
-    int count = 0;
-    int index = numOfINodes - components.length;
-    if (index > 0) {
-      index = 0;
-    }
-    while (count < components.length && curNode != null) {
-      final boolean lastComp = (count == components.length - 1);      
-      if (index >= 0) {
-        existing.addNode(curNode);
-      }
-      final boolean isRef = curNode.isReference();
-      final boolean isDir = curNode.isDirectory();
-      final INodeDirectory dir = isDir? curNode.asDirectory(): null;  
-      if (!isRef && isDir && dir instanceof INodeDirectoryWithSnapshot) {
-        //if the path is a non-snapshot path, update the latest snapshot.
-        if (!existing.isSnapshot()) {
-          existing.updateLatestSnapshot(
-              ((INodeDirectoryWithSnapshot)dir).getLastSnapshot());
-        }
-      } else if (isRef && isDir && !lastComp) {
-        // If the curNode is a reference node, need to check its dstSnapshot:
-        // 1. if the existing snapshot is no later than the dstSnapshot (which
-        // is the latest snapshot in dst before the rename), the changes 
-        // should be recorded in previous snapshots (belonging to src).
-        // 2. however, if the ref node is already the last component, we still 
-        // need to know the latest snapshot among the ref node's ancestors, 
-        // in case of processing a deletion operation. Thus we do not overwrite
-        // the latest snapshot if lastComp is true. In case of the operation is
-        // a modification operation, we do a similar check in corresponding 
-        // recordModification method.
-        if (!existing.isSnapshot()) {
-          int dstSnapshotId = curNode.asReference().getDstSnapshotId();
-          Snapshot latest = existing.getLatestSnapshot();
-          if (latest == null ||  // no snapshot in dst tree of rename
-              dstSnapshotId >= latest.getId()) { // the above scenario 
-            Snapshot lastSnapshot = null;
-            if (curNode.isDirectory()
-                && curNode.asDirectory() instanceof 
INodeDirectoryWithSnapshot) {
-              lastSnapshot = ((INodeDirectoryWithSnapshot) curNode
-                  .asDirectory()).getLastSnapshot();
-            } else if (curNode.isFile()
-                && curNode.asFile() instanceof INodeFileWithSnapshot) {
-              lastSnapshot = ((INodeFileWithSnapshot) curNode
-                  .asFile()).getDiffs().getLastSnapshot();
-            }
-            existing.setSnapshot(lastSnapshot);
-          }
-        }
-      }
-      if (curNode.isSymlink() && (!lastComp || (lastComp && resolveLink))) {
-        final String path = constructPath(components, 0, components.length);
-        final String preceding = constructPath(components, 0, count);
-        final String remainder =
-          constructPath(components, count + 1, components.length);
-        final String link = DFSUtil.bytes2String(components[count]);
-        final String target = curNode.asSymlink().getSymlinkString();
-        if (NameNode.stateChangeLog.isDebugEnabled()) {
-          NameNode.stateChangeLog.debug("UnresolvedPathException " +
-            " path: " + path + " preceding: " + preceding +
-            " count: " + count + " link: " + link + " target: " + target +
-            " remainder: " + remainder);
-        }
-        throw new UnresolvedPathException(path, preceding, remainder, target);
-      }
-      if (lastComp || !isDir) {
-        break;
-      }
-      final byte[] childName = components[count + 1];
-      
-      // check if the next byte[] in components is for ".snapshot"
-      if (isDotSnapshotDir(childName)
-          && isDir && dir instanceof INodeDirectoryWithSnapshot) {
-        // skip the ".snapshot" in components
-        count++;
-        index++;
-        existing.isSnapshot = true;
-        if (index >= 0) { // decrease the capacity by 1 to account for 
.snapshot
-          existing.capacity--;
-        }
-        // check if ".snapshot" is the last element of components
-        if (count == components.length - 1) {
-          break;
-        }
-        // Resolve snapshot root
-        final Snapshot s = ((INodeDirectorySnapshottable)dir).getSnapshot(
-            components[count + 1]);
-        if (s == null) {
-          //snapshot not found
-          curNode = null;
-        } else {
-          curNode = s.getRoot();
-          existing.setSnapshot(s);
-        }
-        if (index >= -1) {
-          existing.snapshotRootIndex = existing.numNonNull;
-        }
-      } else {
-        // normal case, and also for resolving file/dir under snapshot root
-        curNode = dir.getChild(childName, existing.getPathSnapshot());
-      }
-      count++;
-      index++;
-    }
-    return existing;
-  }
-
-  /**
-   * @return true if path component is {@link HdfsConstants#DOT_SNAPSHOT_DIR}
-   */
-  private static boolean isDotSnapshotDir(byte[] pathComponent) {
-    return pathComponent == null ? false
-        : Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, pathComponent);
-  }
-
-  /**
    * Given a child's name, return the index of the next child
    *
    * @param name a child's name
@@ -714,207 +547,6 @@ public class INodeDirectory extends INod
         && getFsPermission().equals(other.getFsPermission());
   }
   
-  /**
-   * Used by
-   * {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}.
-   * Contains INodes information resolved from a given path.
-   */
-  public static class INodesInPath {
-    private final byte[][] path;
-    /**
-     * Array with the specified number of INodes resolved for a given path.
-     */
-    private INode[] inodes;
-    /**
-     * Indicate the number of non-null elements in {@link #inodes}
-     */
-    private int numNonNull;
-    /**
-     * The path for a snapshot file/dir contains the .snapshot thus makes the
-     * length of the path components larger the number of inodes. We use
-     * the capacity to control this special case.
-     */
-    private int capacity;
-    /**
-     * true if this path corresponds to a snapshot
-     */
-    private boolean isSnapshot;
-    /**
-     * Index of {@link INodeDirectoryWithSnapshot} for snapshot path, else -1
-     */
-    private int snapshotRootIndex;
-    /**
-     * For snapshot paths, it is the reference to the snapshot; or null if the
-     * snapshot does not exist. For non-snapshot paths, it is the reference to
-     * the latest snapshot found in the path; or null if no snapshot is found.
-     */
-    private Snapshot snapshot = null; 
-
-    private INodesInPath(byte[][] path, int number) {
-      this.path = path;
-      assert (number >= 0);
-      inodes = new INode[number];
-      capacity = number;
-      numNonNull = 0;
-      isSnapshot = false;
-      snapshotRootIndex = -1;
-    }
-
-    /**
-     * For non-snapshot paths, return the latest snapshot found in the path.
-     * For snapshot paths, return null.
-     */
-    public Snapshot getLatestSnapshot() {
-      return isSnapshot? null: snapshot;
-    }
-    
-    /**
-     * For snapshot paths, return the snapshot specified in the path.
-     * For non-snapshot paths, return null.
-     */
-    public Snapshot getPathSnapshot() {
-      return isSnapshot? snapshot: null;
-    }
-
-    private void setSnapshot(Snapshot s) {
-      snapshot = s;
-    }
-    
-    private void updateLatestSnapshot(Snapshot s) {
-      if (snapshot == null
-          || (s != null && Snapshot.ID_COMPARATOR.compare(snapshot, s) < 0)) {
-        snapshot = s;
-      }
-    }
-
-    /**
-     * @return the whole inodes array including the null elements.
-     */
-    INode[] getINodes() {
-      if (capacity < inodes.length) {
-        INode[] newNodes = new INode[capacity];
-        System.arraycopy(inodes, 0, newNodes, 0, capacity);
-        inodes = newNodes;
-      }
-      return inodes;
-    }
-    
-    /**
-     * @return the i-th inode if i >= 0;
-     *         otherwise, i < 0, return the (length + i)-th inode.
-     */
-    public INode getINode(int i) {
-      return inodes[i >= 0? i: inodes.length + i];
-    }
-    
-    /** @return the last inode. */
-    public INode getLastINode() {
-      return inodes[inodes.length - 1];
-    }
-
-    byte[] getLastLocalName() {
-      return path[path.length - 1];
-    }
-    
-    /**
-     * @return index of the {@link INodeDirectoryWithSnapshot} in
-     *         {@link #inodes} for snapshot path, else -1.
-     */
-    int getSnapshotRootIndex() {
-      return this.snapshotRootIndex;
-    }
-    
-    /**
-     * @return isSnapshot true for a snapshot path
-     */
-    boolean isSnapshot() {
-      return this.isSnapshot;
-    }
-    
-    /**
-     * Add an INode at the end of the array
-     */
-    private void addNode(INode node) {
-      inodes[numNonNull++] = node;
-    }
-    
-    void setINode(int i, INode inode) {
-      inodes[i >= 0? i: inodes.length + i] = inode;
-    }
-    
-    void setLastINode(INode last) {
-      inodes[inodes.length - 1] = last;
-    }
-    
-    /**
-     * @return The number of non-null elements
-     */
-    int getNumNonNull() {
-      return numNonNull;
-    }
-    
-    static String toString(INode inode) {
-      return inode == null? null: inode.getLocalName();
-    }
-
-    @Override
-    public String toString() {
-      return toString(true);
-    }
-
-    private String toString(boolean vaildateObject) {
-      if (vaildateObject) {
-        vaildate();
-      }
-
-      final StringBuilder b = new StringBuilder(getClass().getSimpleName())
-          .append(": path = ").append(DFSUtil.byteArray2PathString(path))
-          .append("\n  inodes = ");
-      if (inodes == null) {
-        b.append("null");
-      } else if (inodes.length == 0) {
-        b.append("[]");
-      } else {
-        b.append("[").append(toString(inodes[0]));
-        for(int i = 1; i < inodes.length; i++) {
-          b.append(", ").append(toString(inodes[i]));
-        }
-        b.append("], length=").append(inodes.length);
-      }
-      b.append("\n  numNonNull = ").append(numNonNull)
-       .append("\n  capacity   = ").append(capacity)
-       .append("\n  isSnapshot        = ").append(isSnapshot)
-       .append("\n  snapshotRootIndex = ").append(snapshotRootIndex)
-       .append("\n  snapshot          = ").append(snapshot);
-      return b.toString();
-    }
-
-    void vaildate() {
-      // check parent up to snapshotRootIndex or numNonNull
-      final int n = snapshotRootIndex >= 0? snapshotRootIndex + 1: numNonNull; 
 
-      int i = 0;
-      if (inodes[i] != null) {
-        for(i++; i < n && inodes[i] != null; i++) {
-          final INodeDirectory parent_i = inodes[i].getParent();
-          final INodeDirectory parent_i_1 = inodes[i-1].getParent();
-          if (parent_i != inodes[i-1] &&
-              (parent_i_1 == null || !parent_i_1.isSnapshottable()
-                  || parent_i != parent_i_1)) {
-            throw new AssertionError(
-                "inodes[" + i + "].getParent() != inodes[" + (i-1)
-                + "]\n  inodes[" + i + "]=" + inodes[i].toDetailString()
-                + "\n  inodes[" + (i-1) + "]=" + inodes[i-1].toDetailString()
-                + "\n this=" + toString(false));
-          }
-        }
-      }
-      if (i != n) {
-        throw new AssertionError("i = " + i + " != " + n
-            + ", this=" + toString(false));
-      }
-    }
-  }
-
   /*
    * The following code is to dump the tree recursively for testing.
    * 

Added: 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java?rev=1468725&view=auto
==============================================================================
--- 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
 (added)
+++ 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
 Wed Apr 17 02:41:38 2013
@@ -0,0 +1,422 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.Arrays;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
+import 
org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
+import 
org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Contains INodes information resolved from a given path.
+ */
+public class INodesInPath {
+  public static final Log LOG = LogFactory.getLog(INodesInPath.class);
+
+  /**
+   * @return true if path component is {@link HdfsConstants#DOT_SNAPSHOT_DIR}
+   */
+  private static boolean isDotSnapshotDir(byte[] pathComponent) {
+    return pathComponent == null ? false
+        : Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, pathComponent);
+  }
+
+  /**
+   * Given some components, create a path name.
+   * @param components The path components
+   * @param start index
+   * @param end index
+   * @return concatenated path
+   */
+  private static String constructPath(byte[][] components, int start, int end) 
{
+    StringBuilder buf = new StringBuilder();
+    for (int i = start; i < end; i++) {
+      buf.append(DFSUtil.bytes2String(components[i]));
+      if (i < end - 1) {
+        buf.append(Path.SEPARATOR);
+      }
+    }
+    return buf.toString();
+  }
+
+  static INodesInPath resolve(final INodeDirectory startingDir,
+      final byte[][] components) throws UnresolvedLinkException {
+    return resolve(startingDir, components, components.length, false);
+  }
+
+  /**
+   * Retrieve existing INodes from a path. If existing is big enough to store
+   * all path components (existing and non-existing), then existing INodes
+   * will be stored starting from the root INode into existing[0]; if
+   * existing is not big enough to store all path components, then only the
+   * last existing and non existing INodes will be stored so that
+   * existing[existing.length-1] refers to the INode of the final component.
+   * 
+   * An UnresolvedPathException is always thrown when an intermediate path 
+   * component refers to a symbolic link. If the final path component refers 
+   * to a symbolic link then an UnresolvedPathException is only thrown if
+   * resolveLink is true.  
+   * 
+   * <p>
+   * Example: <br>
+   * Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the
+   * following path components: ["","c1","c2","c3"],
+   * 
+   * <p>
+   * <code>getExistingPathINodes(["","c1","c2"], [?])</code> should fill the
+   * array with [c2] <br>
+   * <code>getExistingPathINodes(["","c1","c2","c3"], [?])</code> should fill 
the
+   * array with [null]
+   * 
+   * <p>
+   * <code>getExistingPathINodes(["","c1","c2"], [?,?])</code> should fill the
+   * array with [c1,c2] <br>
+   * <code>getExistingPathINodes(["","c1","c2","c3"], [?,?])</code> should fill
+   * the array with [c2,null]
+   * 
+   * <p>
+   * <code>getExistingPathINodes(["","c1","c2"], [?,?,?,?])</code> should fill
+   * the array with [rootINode,c1,c2,null], <br>
+   * <code>getExistingPathINodes(["","c1","c2","c3"], [?,?,?,?])</code> should
+   * fill the array with [rootINode,c1,c2,null]
+   * 
+   * @param startingDir the starting directory
+   * @param components array of path component name
+   * @param numOfINodes number of INodes to return
+   * @param resolveLink indicates whether UnresolvedLinkException should
+   *        be thrown when the path refers to a symbolic link.
+   * @return the specified number of existing INodes in the path
+   */
+  static INodesInPath resolve(final INodeDirectory startingDir,
+      final byte[][] components, final int numOfINodes, 
+      final boolean resolveLink) throws UnresolvedLinkException {
+    Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0);
+
+    INode curNode = startingDir;
+    final INodesInPath existing = new INodesInPath(components, numOfINodes);
+    int count = 0;
+    int index = numOfINodes - components.length;
+    if (index > 0) {
+      index = 0;
+    }
+    while (count < components.length && curNode != null) {
+      final boolean lastComp = (count == components.length - 1);      
+      if (index >= 0) {
+        existing.addNode(curNode);
+      }
+      final boolean isRef = curNode.isReference();
+      final boolean isDir = curNode.isDirectory();
+      final INodeDirectory dir = isDir? curNode.asDirectory(): null;  
+      if (!isRef && isDir && dir instanceof INodeDirectoryWithSnapshot) {
+        //if the path is a non-snapshot path, update the latest snapshot.
+        if (!existing.isSnapshot()) {
+          existing.updateLatestSnapshot(
+              ((INodeDirectoryWithSnapshot)dir).getLastSnapshot());
+        }
+      } else if (isRef && isDir && !lastComp) {
+        // If the curNode is a reference node, need to check its dstSnapshot:
+        // 1. if the existing snapshot is no later than the dstSnapshot (which
+        // is the latest snapshot in dst before the rename), the changes 
+        // should be recorded in previous snapshots (belonging to src).
+        // 2. however, if the ref node is already the last component, we still 
+        // need to know the latest snapshot among the ref node's ancestors, 
+        // in case of processing a deletion operation. Thus we do not overwrite
+        // the latest snapshot if lastComp is true. In case of the operation is
+        // a modification operation, we do a similar check in corresponding 
+        // recordModification method.
+        if (!existing.isSnapshot()) {
+          int dstSnapshotId = curNode.asReference().getDstSnapshotId();
+          Snapshot latest = existing.getLatestSnapshot();
+          if (latest == null ||  // no snapshot in dst tree of rename
+              dstSnapshotId >= latest.getId()) { // the above scenario 
+            Snapshot lastSnapshot = null;
+            if (curNode.isDirectory()
+                && curNode.asDirectory() instanceof 
INodeDirectoryWithSnapshot) {
+              lastSnapshot = ((INodeDirectoryWithSnapshot) curNode
+                  .asDirectory()).getLastSnapshot();
+            } else if (curNode.isFile()
+                && curNode.asFile() instanceof INodeFileWithSnapshot) {
+              lastSnapshot = ((INodeFileWithSnapshot) curNode
+                  .asFile()).getDiffs().getLastSnapshot();
+            }
+            existing.setSnapshot(lastSnapshot);
+          }
+        }
+      }
+      if (curNode.isSymlink() && (!lastComp || (lastComp && resolveLink))) {
+        final String path = constructPath(components, 0, components.length);
+        final String preceding = constructPath(components, 0, count);
+        final String remainder =
+          constructPath(components, count + 1, components.length);
+        final String link = DFSUtil.bytes2String(components[count]);
+        final String target = curNode.asSymlink().getSymlinkString();
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("UnresolvedPathException " +
+            " path: " + path + " preceding: " + preceding +
+            " count: " + count + " link: " + link + " target: " + target +
+            " remainder: " + remainder);
+        }
+        throw new UnresolvedPathException(path, preceding, remainder, target);
+      }
+      if (lastComp || !isDir) {
+        break;
+      }
+      final byte[] childName = components[count + 1];
+      
+      // check if the next byte[] in components is for ".snapshot"
+      if (isDotSnapshotDir(childName)
+          && isDir && dir instanceof INodeDirectoryWithSnapshot) {
+        // skip the ".snapshot" in components
+        count++;
+        index++;
+        existing.isSnapshot = true;
+        if (index >= 0) { // decrease the capacity by 1 to account for 
.snapshot
+          existing.capacity--;
+        }
+        // check if ".snapshot" is the last element of components
+        if (count == components.length - 1) {
+          break;
+        }
+        // Resolve snapshot root
+        final Snapshot s = ((INodeDirectorySnapshottable)dir).getSnapshot(
+            components[count + 1]);
+        if (s == null) {
+          //snapshot not found
+          curNode = null;
+        } else {
+          curNode = s.getRoot();
+          existing.setSnapshot(s);
+        }
+        if (index >= -1) {
+          existing.snapshotRootIndex = existing.numNonNull;
+        }
+      } else {
+        // normal case, and also for resolving file/dir under snapshot root
+        curNode = dir.getChild(childName, existing.getPathSnapshot());
+      }
+      count++;
+      index++;
+    }
+    return existing;
+  }
+
+  private final byte[][] path;
+  /**
+   * Array with the specified number of INodes resolved for a given path.
+   */
+  private INode[] inodes;
+  /**
+   * Indicate the number of non-null elements in {@link #inodes}
+   */
+  private int numNonNull;
+  /**
+   * The path for a snapshot file/dir contains the .snapshot thus makes the
+   * length of the path components larger the number of inodes. We use
+   * the capacity to control this special case.
+   */
+  private int capacity;
+  /**
+   * true if this path corresponds to a snapshot
+   */
+  private boolean isSnapshot;
+  /**
+   * Index of {@link INodeDirectoryWithSnapshot} for snapshot path, else -1
+   */
+  private int snapshotRootIndex;
+  /**
+   * For snapshot paths, it is the reference to the snapshot; or null if the
+   * snapshot does not exist. For non-snapshot paths, it is the reference to
+   * the latest snapshot found in the path; or null if no snapshot is found.
+   */
+  private Snapshot snapshot = null; 
+
+  private INodesInPath(byte[][] path, int number) {
+    this.path = path;
+    assert (number >= 0);
+    inodes = new INode[number];
+    capacity = number;
+    numNonNull = 0;
+    isSnapshot = false;
+    snapshotRootIndex = -1;
+  }
+
+  /**
+   * For non-snapshot paths, return the latest snapshot found in the path.
+   * For snapshot paths, return null.
+   */
+  public Snapshot getLatestSnapshot() {
+    return isSnapshot? null: snapshot;
+  }
+  
+  /**
+   * For snapshot paths, return the snapshot specified in the path.
+   * For non-snapshot paths, return null.
+   */
+  public Snapshot getPathSnapshot() {
+    return isSnapshot? snapshot: null;
+  }
+
+  private void setSnapshot(Snapshot s) {
+    snapshot = s;
+  }
+  
+  private void updateLatestSnapshot(Snapshot s) {
+    if (snapshot == null
+        || (s != null && Snapshot.ID_COMPARATOR.compare(snapshot, s) < 0)) {
+      snapshot = s;
+    }
+  }
+
+  /**
+   * @return the whole inodes array including the null elements.
+   */
+  INode[] getINodes() {
+    if (capacity < inodes.length) {
+      INode[] newNodes = new INode[capacity];
+      System.arraycopy(inodes, 0, newNodes, 0, capacity);
+      inodes = newNodes;
+    }
+    return inodes;
+  }
+  
+  /**
+   * @return the i-th inode if i >= 0;
+   *         otherwise, i < 0, return the (length + i)-th inode.
+   */
+  public INode getINode(int i) {
+    return inodes[i >= 0? i: inodes.length + i];
+  }
+  
+  /** @return the last inode. */
+  public INode getLastINode() {
+    return inodes[inodes.length - 1];
+  }
+
+  byte[] getLastLocalName() {
+    return path[path.length - 1];
+  }
+  
+  /**
+   * @return index of the {@link INodeDirectoryWithSnapshot} in
+   *         {@link #inodes} for snapshot path, else -1.
+   */
+  int getSnapshotRootIndex() {
+    return this.snapshotRootIndex;
+  }
+  
+  /**
+   * @return isSnapshot true for a snapshot path
+   */
+  boolean isSnapshot() {
+    return this.isSnapshot;
+  }
+  
+  /**
+   * Add an INode at the end of the array
+   */
+  private void addNode(INode node) {
+    inodes[numNonNull++] = node;
+  }
+  
+  void setINode(int i, INode inode) {
+    inodes[i >= 0? i: inodes.length + i] = inode;
+  }
+  
+  void setLastINode(INode last) {
+    inodes[inodes.length - 1] = last;
+  }
+  
+  /**
+   * @return The number of non-null elements
+   */
+  int getNumNonNull() {
+    return numNonNull;
+  }
+  
+  private static String toString(INode inode) {
+    return inode == null? null: inode.getLocalName();
+  }
+
+  @Override
+  public String toString() {
+    return toString(true);
+  }
+
+  private String toString(boolean vaildateObject) {
+    if (vaildateObject) {
+      vaildate();
+    }
+
+    final StringBuilder b = new StringBuilder(getClass().getSimpleName())
+        .append(": path = ").append(DFSUtil.byteArray2PathString(path))
+        .append("\n  inodes = ");
+    if (inodes == null) {
+      b.append("null");
+    } else if (inodes.length == 0) {
+      b.append("[]");
+    } else {
+      b.append("[").append(toString(inodes[0]));
+      for(int i = 1; i < inodes.length; i++) {
+        b.append(", ").append(toString(inodes[i]));
+      }
+      b.append("], length=").append(inodes.length);
+    }
+    b.append("\n  numNonNull = ").append(numNonNull)
+     .append("\n  capacity   = ").append(capacity)
+     .append("\n  isSnapshot        = ").append(isSnapshot)
+     .append("\n  snapshotRootIndex = ").append(snapshotRootIndex)
+     .append("\n  snapshot          = ").append(snapshot);
+    return b.toString();
+  }
+
+  void vaildate() {
+    // check parent up to snapshotRootIndex or numNonNull
+    final int n = snapshotRootIndex >= 0? snapshotRootIndex + 1: numNonNull;  
+    int i = 0;
+    if (inodes[i] != null) {
+      for(i++; i < n && inodes[i] != null; i++) {
+        final INodeDirectory parent_i = inodes[i].getParent();
+        final INodeDirectory parent_i_1 = inodes[i-1].getParent();
+        if (parent_i != inodes[i-1] &&
+            (parent_i_1 == null || !parent_i_1.isSnapshottable()
+                || parent_i != parent_i_1)) {
+          throw new AssertionError(
+              "inodes[" + i + "].getParent() != inodes[" + (i-1)
+              + "]\n  inodes[" + i + "]=" + inodes[i].toDetailString()
+              + "\n  inodes[" + (i-1) + "]=" + inodes[i-1].toDetailString()
+              + "\n this=" + toString(false));
+        }
+      }
+    }
+    if (i != n) {
+      throw new AssertionError("i = " + i + " != " + n
+          + ", this=" + toString(false));
+    }
+  }
+}
\ No newline at end of file

Modified: 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java?rev=1468725&r1=1468724&r2=1468725&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
 (original)
+++ 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
 Wed Apr 17 02:41:38 2013
@@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
-import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
+import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SnapshotDiffInfo;
 
 /**

Modified: 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java?rev=1468725&r1=1468724&r2=1468725&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
 (original)
+++ 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
 Wed Apr 17 02:41:38 2013
@@ -28,7 +28,6 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
@@ -139,8 +138,7 @@ public class TestSnapshotPathINodes {
     // Get the inodes by resolving the path of a normal file
     String[] names = INode.getPathNames(file1.toString());
     byte[][] components = INode.getPathComponents(names);
-    INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
-        components.length, false);
+    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
     INode[] inodes = nodesInPath.getINodes();
     // The number of inodes should be equal to components.length
     assertEquals(inodes.length, components.length);
@@ -159,7 +157,7 @@ public class TestSnapshotPathINodes {
     
     // Call getExistingPathINodes and request only one INode. This is used
     // when identifying the INode for a given path.
-    nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 1, false);
+    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);
     inodes = nodesInPath.getINodes();
     assertEquals(inodes.length, 1);
     assertSnapshot(nodesInPath, false, null, -1);
@@ -167,7 +165,7 @@ public class TestSnapshotPathINodes {
     
     // Call getExistingPathINodes and request 2 INodes. This is usually used
     // when identifying the parent INode of a given path.
-    nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 2, false);
+    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 2, false);
     inodes = nodesInPath.getINodes();
     assertEquals(inodes.length, 2);
     assertSnapshot(nodesInPath, false, null, -1);
@@ -190,8 +188,7 @@ public class TestSnapshotPathINodes {
     String snapshotPath = sub1.toString() + "/.snapshot/s1/file1";
     String[] names = INode.getPathNames(snapshotPath);
     byte[][] components = INode.getPathComponents(names);
-    INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
-        components.length, false);
+    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
     INode[] inodes = nodesInPath.getINodes();
     // Length of inodes should be (components.length - 1), since we will ignore
     // ".snapshot" 
@@ -206,7 +203,7 @@ public class TestSnapshotPathINodes {
         INodeDirectoryWithSnapshot);
     
     // Call getExistingPathINodes and request only one INode.
-    nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 1, false);
+    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);
     inodes = nodesInPath.getINodes();
     assertEquals(inodes.length, 1);
     // The snapshotroot (s1) is not included in inodes. Thus the
@@ -216,7 +213,7 @@ public class TestSnapshotPathINodes {
     assertINodeFile(nodesInPath.getLastINode(), file1);
     
     // Call getExistingPathINodes and request 2 INodes.
-    nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 2, false);
+    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 2, false);
     inodes = nodesInPath.getINodes();
     assertEquals(inodes.length, 2);
     // There should be two INodes in inodes: s1 and snapshot of file1. Thus the
@@ -228,8 +225,7 @@ public class TestSnapshotPathINodes {
     String dotSnapshotPath = sub1.toString() + "/.snapshot";
     names = INode.getPathNames(dotSnapshotPath);
     components = INode.getPathComponents(names);
-    nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
-        components.length, false);
+    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
     inodes = nodesInPath.getINodes();
     // The number of INodes returned should be components.length - 1 since we
     // will ignore ".snapshot"
@@ -264,8 +260,7 @@ public class TestSnapshotPathINodes {
       String snapshotPath = sub1.toString() + "/.snapshot/s2/file1";
       String[] names = INode.getPathNames(snapshotPath);
       byte[][] components = INode.getPathComponents(names);
-      INodesInPath nodesInPath = 
fsdir.rootDir.getExistingPathINodes(components,
-          components.length, false);
+      INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, 
components);
       INode[] inodes = nodesInPath.getINodes();
       // Length of inodes should be (components.length - 1), since we will 
ignore
       // ".snapshot" 
@@ -283,8 +278,7 @@ public class TestSnapshotPathINodes {
     // Check the INodes for path /TestSnapshot/sub1/file1
     String[] names = INode.getPathNames(file1.toString());
     byte[][] components = INode.getPathComponents(names);
-    INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
-        components.length, false);
+    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
     INode[] inodes = nodesInPath.getINodes();
     // The length of inodes should be equal to components.length
     assertEquals(inodes.length, components.length);
@@ -324,8 +318,7 @@ public class TestSnapshotPathINodes {
       String snapshotPath = sub1.toString() + "/.snapshot/s4/file3";
       String[] names = INode.getPathNames(snapshotPath);
       byte[][] components = INode.getPathComponents(names);
-      INodesInPath nodesInPath = 
fsdir.rootDir.getExistingPathINodes(components,
-          components.length, false);
+      INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, 
components);
       INode[] inodes = nodesInPath.getINodes();
       // Length of inodes should be (components.length - 1), since we will 
ignore
       // ".snapshot" 
@@ -345,8 +338,7 @@ public class TestSnapshotPathINodes {
     // Check the inodes for /TestSnapshot/sub1/file3
     String[] names = INode.getPathNames(file3.toString());
     byte[][] components = INode.getPathComponents(names);
-    INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
-        components.length, false);
+    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
     INode[] inodes = nodesInPath.getINodes();
     // The number of inodes should be equal to components.length
     assertEquals(inodes.length, components.length);
@@ -375,8 +367,7 @@ public class TestSnapshotPathINodes {
     // First check the INode for /TestSnapshot/sub1/file1
     String[] names = INode.getPathNames(file1.toString());
     byte[][] components = INode.getPathComponents(names);
-    INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
-        components.length, false);
+    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
     INode[] inodes = nodesInPath.getINodes();
     // The number of inodes should be equal to components.length
     assertEquals(inodes.length, components.length);
@@ -398,8 +389,7 @@ public class TestSnapshotPathINodes {
     String snapshotPath = sub1.toString() + "/.snapshot/s3/file1";
     names = INode.getPathNames(snapshotPath);
     components = INode.getPathComponents(names);
-    INodesInPath ssNodesInPath = fsdir.rootDir.getExistingPathINodes(
-        components, components.length, false);
+    INodesInPath ssNodesInPath = INodesInPath.resolve(fsdir.rootDir, 
components);
     INode[] ssInodes = ssNodesInPath.getINodes();
     // Length of ssInodes should be (components.length - 1), since we will
     // ignore ".snapshot" 
@@ -418,8 +408,7 @@ public class TestSnapshotPathINodes {
     // Check the INode for /TestSnapshot/sub1/file1 again
     names = INode.getPathNames(file1.toString());
     components = INode.getPathComponents(names);
-    INodesInPath newNodesInPath = fsdir.rootDir
-        .getExistingPathINodes(components, components.length, false);
+    INodesInPath newNodesInPath = INodesInPath.resolve(fsdir.rootDir, 
components);
     assertSnapshot(newNodesInPath, false, s3, -1);
     INode[] newInodes = newNodesInPath.getINodes();
     // The number of inodes should be equal to components.length

Modified: 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java?rev=1468725&r1=1468724&r2=1468725&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
 (original)
+++ 
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
 Wed Apr 17 02:41:38 2013
@@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
-import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
+import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;


Reply via email to