[partial-ns] Implement DBChildrenView.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d7972d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d7972d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d7972d4

Branch: refs/heads/feature-HDFS-8286
Commit: 2d7972d4473a5a7c3a4cf2fbca6e23b4a7d38ca6
Parents: c2cf9bc
Author: Haohui Mai <whe...@apache.org>
Authored: Wed May 27 18:10:47 2015 -0700
Committer: Haohui Mai <whe...@apache.org>
Committed: Fri Jun 12 13:57:01 2015 -0700

----------------------------------------------------------------------
 .../hdfs/server/namenode/DBChildrenView.java    | 31 ++++++
 .../hdfs/server/namenode/FSDirDeleteOp.java     | 37 +++++---
 .../hdfs/server/namenode/FSDirRenameOp.java     | 18 ++--
 .../server/namenode/FSDirStatAndListingOp.java  | 99 ++++++++++----------
 .../hdfs/server/namenode/FSDirectory.java       |  6 +-
 .../hdfs/server/namenode/MemDBChildrenView.java | 36 +++++++
 .../hdfs/server/namenode/ROTransaction.java     |  6 +-
 .../hdfs/server/namenode/RWTransaction.java     |  5 +-
 .../hdfs/server/namenode/Transaction.java       |  2 +-
 9 files changed, 161 insertions(+), 79 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d7972d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DBChildrenView.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DBChildrenView.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DBChildrenView.java
new file mode 100644
index 0000000..c46acaa
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DBChildrenView.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.Closeable;
+import java.nio.ByteBuffer;
+import java.util.Map;
+
+abstract class DBChildrenView implements Closeable, Iterable<Map
+    .Entry<ByteBuffer, Long> > {
+  abstract int size();
+  abstract void seekTo(ByteBuffer start);
+  boolean isEmpty() {
+    return size() == 0;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d7972d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 74dcf46..af253ad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -26,7 +26,9 @@ import 
org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.util.ChunkedArrayList;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.List;
+import java.util.Map;
 
 import static org.apache.hadoop.util.Time.now;
 
@@ -165,7 +167,8 @@ class FSDirDeleteOp {
 
     long mtime = now();
     // Unlink the target directory from directory tree
-    long filesRemoved = delete(tx, paths, collectedBlocks, removedUCFiles, 
mtime);
+    long filesRemoved = delete(tx, paths, collectedBlocks, removedUCFiles,
+                               mtime);
     if (filesRemoved < 0) {
       return null;
     }
@@ -216,7 +219,8 @@ class FSDirDeleteOp {
    */
   private static long unprotectedDelete(
       RWTransaction tx, Resolver.Result paths,
-      BlocksMapUpdateInfo collectedBlocks, List<Long> removedUCFiles, long 
mtime) {
+      BlocksMapUpdateInfo collectedBlocks, List<Long> removedUCFiles, long 
mtime)
+      throws IOException {
     // TODO: Update quota
     FlatINode parent = paths.inodesInPath().getLastINode(-2);
     FlatINode inode = paths.inodesInPath().getLastINode();
@@ -236,21 +240,24 @@ class FSDirDeleteOp {
 
   private static long deleteSubtree(
       RWTransaction tx, long parentId, BlocksMapUpdateInfo collectedBlocks,
-      List<Long> removedUCFiles) {
+      List<Long> removedUCFiles) throws IOException {
     long deleted = 0;
-    for (long child : tx.childrenView(parentId).values()) {
-      FlatINode node = tx.getINode(child);
-      if (node.isFile()) {
-        FlatINodeFileFeature f = node.feature(FlatINodeFileFeature.class);
-        assert f != null;
-        if (f.inConstruction()) {
-          removedUCFiles.add(child);
-        }
-        for (Block b : f.blocks()) {
-          collectedBlocks.addDeleteBlock(b);
+    try (DBChildrenView children = tx.childrenView(parentId)) {
+      for (Map.Entry<ByteBuffer, Long> e : children) {
+        long child = e.getValue();
+        FlatINode node = tx.getINode(child);
+        if (node.isFile()) {
+          FlatINodeFileFeature f = node.feature(FlatINodeFileFeature.class);
+          assert f != null;
+          if (f.inConstruction()) {
+            removedUCFiles.add(child);
+          }
+          for (Block b : f.blocks()) {
+            collectedBlocks.addDeleteBlock(b);
+          }
+        } else if (node.isDirectory()) {
+          deleted += deleteSubtree(tx, child, collectedBlocks, removedUCFiles);
         }
-      } else if (node.isDirectory()) {
-        deleted += deleteSubtree(tx, child, collectedBlocks, removedUCFiles);
       }
     }
     return deleted;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d7972d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index f322186..ae0b73c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -466,8 +466,8 @@ class FSDirRenameOp {
       }
       error = "Rename destination " + dst
           + " is a directory or file under source " + src;
-      NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
-                                       + error);
+      NameNode.stateChangeLog.warn(
+          "DIR* FSDirectory.unprotectedRenameTo: " + error);
       throw new IOException(error);
     }
   }
@@ -491,12 +491,14 @@ class FSDirRenameOp {
       throw new FileAlreadyExistsException(error);
     }
     if (dstInode.isDirectory()) {
-      boolean hasChildren = !tx.childrenView(dstInode.id()).isEmpty();
-      if (hasChildren) {
-        error = "rename destination directory is not empty: " + dst;
-        NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
-                                         + error);
-        throw new IOException(error);
+      try (DBChildrenView children = tx.childrenView(dstInode.id())) {
+        boolean hasChildren = !children.isEmpty();
+        if (hasChildren) {
+          error = "rename destination directory is not empty: " + dst;
+          NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
+                                           + error);
+          throw new IOException(error);
+        }
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d7972d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 0e274b3..326b296 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -243,52 +243,51 @@ class FSDirStatAndListingOp {
           }, 0);
     }
 
-    Map<ByteBuffer, Long> children = tx.childrenView(targetNode.id()).tailMap(
-        ByteBuffer.wrap(startAfter.getBytes(Charsets.UTF_8)));
-    int numOfListing = Math.min(children.size(), fsd.getLsLimit());
-    int locationBudget = fsd.getLsLimit();
-    int listingCnt = 0;
-    int i = 0;
-    HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
-
-    for (Map.Entry<ByteBuffer, Long> e : children.entrySet()) {
-      if (locationBudget < 0 && i >= listing.length) {
-        break;
-      }
+    try (DBChildrenView children = tx.childrenView(targetNode.id())) {
+      children.seekTo(ByteBuffer.wrap(startAfter.getBytes(Charsets.UTF_8)));
+      int numOfListing = fsd.getLsLimit();
+      int locationBudget = fsd.getLsLimit();
+      int listingCnt = 0;
+      int i = 0;
+      HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
+
+      for (Map.Entry<ByteBuffer, Long> e : children) {
+        if (locationBudget < 0 && i >= listing.length) {
+          break;
+        }
 
-      FlatINode cur = tx.getINode(e.getValue());
-      // TODO: Handle Storage policy
+        FlatINode cur = tx.getINode(e.getValue());
+        // TODO: Handle Storage policy
 //      byte curPolicy = isSuperUser && !cur.isSymlink()?
 //          cur.getLocalStoragePolicyID():
 //          HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
-      byte curPolicy = HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
-      ByteBuffer b =e.getKey().duplicate();
-      byte[] localName = new byte[b.remaining()];
-      b.get(localName);
-      listing[i] =
-          createFileStatus(tx, fsd, cur, localName, needLocation,
-                           getStoragePolicyID(curPolicy, parentStoragePolicy));
-      if (needLocation) {
-        // Once we  hit lsLimit locations, stop.
-        // This helps to prevent excessively large response payloads.
-        // Approximate #locations with locatedBlockCount() * repl_factor
-        LocatedBlocks blks =
-            ((HdfsLocatedFileStatus)listing[i]).getBlockLocations();
-        locationBudget -= (blks == null) ? 0 :
-            blks.locatedBlockCount() * listing[i].getReplication();
+        byte curPolicy = HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+        ByteBuffer b = e.getKey().duplicate();
+        byte[] localName = new byte[b.remaining()];
+        b.get(localName);
+        listing[i] = createFileStatus(tx, fsd, cur, localName, needLocation,
+                                      getStoragePolicyID(curPolicy,
+                                                         parentStoragePolicy));
+        if (needLocation) {
+          // Once we  hit lsLimit locations, stop.
+          // This helps to prevent excessively large response payloads.
+          // Approximate #locations with locatedBlockCount() * repl_factor
+          LocatedBlocks blks = ((HdfsLocatedFileStatus) 
listing[i]).getBlockLocations();
+          locationBudget -= (blks == null) ? 0 : blks.locatedBlockCount() * 
listing[i].getReplication();
+        }
+        ++i;
+        ++listingCnt;
       }
-      ++i;
-      ++listingCnt;
-    }
 
-    // truncate return array if necessary
-    if (listingCnt < numOfListing) {
-      listing = Arrays.copyOf(listing, listingCnt);
-    }
+      // truncate return array if necessary
+      if (listingCnt < numOfListing) {
+        listing = Arrays.copyOf(listing, listingCnt);
+      }
 
-    return new DirectoryListing(
-        listing,
-        listingCnt < numOfListing ? 0 : children.size() - listingCnt);
+      return new DirectoryListing(
+          listing,
+          listingCnt < numOfListing ? 0 : children.size() - listingCnt);
+    }
   }
 
   /** Get the file info for a specific file.
@@ -340,8 +339,8 @@ class FSDirStatAndListingOp {
       FSDirectory fsd, String fullPath, byte[] path, INode node,
       byte storagePolicy, int snapshot, boolean isRawPath,
       INodesInPath iip) throws IOException {
-    INodeAttributes nodeAttrs = getINodeAttributes(
-        fsd, fullPath, path, node, snapshot);
+    INodeAttributes nodeAttrs = getINodeAttributes(fsd, fullPath, path, node,
+                                                   snapshot);
     return createFileStatus(fsd, path, node, nodeAttrs,
                             storagePolicy, snapshot, isRawPath, iip);
   }
@@ -426,9 +425,12 @@ class FSDirStatAndListingOp {
       isEncrypted = false;
     }
 
-    int childrenNum = node.isDirectory()
-        ? tx.childrenView(node.id()).size()
-        : 0;
+    int childrenNum = 0;
+    if (node.isDirectory()) {
+      try(DBChildrenView children = tx.childrenView(node.id())) {
+        childrenNum = children.size();
+      }
+    }
 
     PermissionStatus perm = node.permissionStatus(fsd.ugid());
 
@@ -488,9 +490,12 @@ class FSDirStatAndListingOp {
       loc = attachFileInfo(loc, fileSize, isUc, false, feInfo);
     }
 
-    int childrenNum = node.isDirectory()
-        ? tx.childrenView(node.id()).size()
-        : 0;
+    int childrenNum = 0;
+    if (node.isDirectory()) {
+      try(DBChildrenView children = tx.childrenView(node.id())) {
+        childrenNum = children.size();
+      }
+    }
 
     PermissionStatus perm = node.permissionStatus(fsd.ugid());
     HdfsLocatedFileStatus status = new HdfsLocatedFileStatus(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d7972d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 8fd988b..8744916 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -477,7 +477,11 @@ public class FSDirectory implements Closeable {
   static boolean isNonEmptyDirectory(
       Transaction tx, FlatINodesInPath iip) {
     FlatINode inode = iip.getLastINode();
-    return inode.isDirectory() && !tx.childrenView(inode.id()).isEmpty();
+    try (DBChildrenView children = tx.childrenView(inode.id())) {
+      return inode.isDirectory() && !children.isEmpty();
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d7972d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MemDBChildrenView.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MemDBChildrenView.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MemDBChildrenView.java
new file mode 100644
index 0000000..3278111
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MemDBChildrenView.java
@@ -0,0 +1,36 @@
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.NavigableMap;
+
+class MemDBChildrenView extends DBChildrenView {
+  private final NavigableMap<ByteBuffer, Long> childrenMap;
+
+  MemDBChildrenView(NavigableMap<ByteBuffer, Long> childrenMap) {
+    this.childrenMap = childrenMap;
+  }
+
+  private ByteBuffer start;
+
+  @Override
+  public int size() {
+    return childrenMap.size();
+  }
+
+  @Override
+  public void seekTo(ByteBuffer start) {
+    this.start = start;
+  }
+
+  @Override
+  public void close() throws IOException {
+  }
+
+  @Override
+  public Iterator<Map.Entry<ByteBuffer, Long>> iterator() {
+    return childrenMap.tailMap(start).entrySet().iterator();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d7972d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ROTransaction.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ROTransaction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ROTransaction.java
index a135ad9..bc52b12 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ROTransaction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ROTransaction.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
-import java.util.Map;
-import java.util.NavigableMap;
 
 class ROTransaction extends Transaction {
   ROTransaction(FSDirectory fsd) {
@@ -43,9 +41,9 @@ class ROTransaction extends Transaction {
   }
 
   @Override
-  NavigableMap<ByteBuffer, Long> childrenView(long parent) {
+  DBChildrenView childrenView(long parent) {
     DB.INodeContainer c = fsd.db().getINode(parent);
-    return c.readOnlyChildren();
+    return new MemDBChildrenView(c.readOnlyChildren());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d7972d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RWTransaction.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RWTransaction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RWTransaction.java
index 14171c6..6eb8151 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RWTransaction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RWTransaction.java
@@ -26,7 +26,6 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.NavigableMap;
 
 import static org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID;
 
@@ -65,12 +64,12 @@ class RWTransaction extends Transaction {
   }
 
   @Override
-  NavigableMap<ByteBuffer, Long> childrenView(long parent) {
+  DBChildrenView childrenView(long parent) {
     // TODO: This function only provides a read-only view for the content in
     // the DB. It needs to consider the modification in this transaction to
     // implement transactional semantic.
     DB.INodeContainer c = fsd.db().getINode(parent);
-    return c.readOnlyChildren();
+    return new MemDBChildrenView(c.readOnlyChildren());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d7972d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Transaction.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Transaction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Transaction.java
index cf80103..7427793 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Transaction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Transaction.java
@@ -36,7 +36,7 @@ abstract class Transaction implements Closeable {
   abstract FlatINode getINode(long id);
   abstract long getChild(long parentId, ByteBuffer localName);
 
-  abstract NavigableMap<ByteBuffer, Long> childrenView(long parent);
+  abstract DBChildrenView childrenView(long parent);
 
   protected FlatINode getINodeFromDB(long id) {
     DB.INodeContainer c = fsd.db().getINode(id);

Reply via email to