Author: kihwal
Date: Thu Nov 14 16:59:23 2013
New Revision: 1541978

URL: http://svn.apache.org/r1541978
Log:
HDFS-4995. Make getContentSummary less expensive. Contributed by Kihwal Lee.

Added:
    
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
   (with props)
Modified:
    
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
    
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
    
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
    
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
    
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java

Modified: 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1541978&r1=1541977&r2=1541978&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
(original)
+++ 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
Thu Nov 14 16:59:23 2013
@@ -22,6 +22,8 @@ Release 0.23.10 - UNRELEASED
     HDFS-5346. Avoid unnecessary call to getNumLiveDataNodes() for each block
     during IBR processing (Ravi Prakash via kihwal)
 
+    HDFS-4995. Make getContentSummary less expensive. (kihwal)
+
   OPTIMIZATIONS
 
     HDFS-5239.  Allow FSNamesystem lock fairness to be configurable (daryn via

Modified: 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1541978&r1=1541977&r2=1541978&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 (original)
+++ 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 Thu Nov 14 16:59:23 2013
@@ -140,6 +140,8 @@ public class DFSConfigKeys extends Commo
   
   public static final String  DFS_LIST_LIMIT = "dfs.ls.limit";
   public static final int     DFS_LIST_LIMIT_DEFAULT = 1000;
+  public static final String  DFS_CONTENT_SUMMARY_LIMIT_KEY = 
"dfs.content-summary.limit";
+  public static final int     DFS_CONTENT_SUMMARY_LIMIT_DEFAULT = 0;
   public static final String  DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY = 
"dfs.datanode.failed.volumes.tolerated";
   public static final int     DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT = 
0;
   public static final String  DFS_DATANODE_SYNCONCLOSE_KEY = 
"dfs.datanode.synconclose";

Added: 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java?rev=1541978&view=auto
==============================================================================
--- 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
 (added)
+++ 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
 Thu Nov 14 16:59:23 2013
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class ContentSummaryComputationContext {
+  private FSDirectory dir = null;
+  private FSNamesystem fsn = null;
+  private long[] counts = null;
+  private long nextCountLimit = 0;
+  private long limitPerRun = 0;
+  private long yieldCount = 0;
+
+  /**
+   * Constructor
+   *
+   * @param dir The FSDirectory instance
+   * @param fsn The FSNamesystem instance
+   * @param limitPerRun allowed number of operations in one
+   *        locking period. 0 or a negative number means
+   *        no limit (i.e. no yielding)
+   */
+  public ContentSummaryComputationContext(FSDirectory dir,
+      FSNamesystem fsn, long limitPerRun) {
+    this.dir = dir;
+    this.fsn = fsn;
+    this.limitPerRun = limitPerRun;
+    this.nextCountLimit = limitPerRun;
+    this.counts = new long[]{0,0,0,0};
+  }
+
+  /** Constructor for blocking computation. */
+  public ContentSummaryComputationContext() {
+    this(null, null, 0);
+  }
+
+  /** Return current yield count */
+  public long getYieldCount() {
+    return yieldCount;
+  }
+
+  /**
+   * Relinquish locks held during computation for a short while
+   * and reacquire them. This will give other threads a chance
+   * to acquire the contended locks and run.
+   *
+   * @return true if locks were released and reacquired.
+   */
+  public boolean yield() {
+    // Are we set up to do this?
+    if (limitPerRun <= 0 || dir == null || fsn == null) {
+      return false;
+    }
+
+    // Have we reached the limit?
+    long currentCount = counts[0] + counts[1] + counts[2] +
+        counts[3];
+    if (currentCount <= nextCountLimit) {
+      return false;
+    }
+
+    // Update the next limit
+    nextCountLimit = currentCount + limitPerRun;
+
+    boolean hadDirReadLock = dir.hasReadLock();
+    boolean hadDirWriteLock = dir.hasWriteLock();
+    boolean hadFsnReadLock = fsn.hasReadLock();
+    boolean hadFsnWriteLock = fsn.hasWriteLock();
+
+    // sanity check.
+    if (!hadDirReadLock || !hadFsnReadLock || hadDirWriteLock ||
+        hadFsnWriteLock || dir.getReadHoldCount() != 1 ||
+        fsn.getReadHoldCount() != 1) {
+      // cannot relinquish
+      return false;
+    }
+
+    // unlock
+    dir.readUnlock();
+    fsn.readUnlock();
+
+    try {
+      Thread.sleep(1);
+    } catch (InterruptedException ie) {
+    } finally {
+      // reacquire
+      fsn.readLock();
+      dir.readLock();
+    }
+    yieldCount++;
+    return true;
+  }
+
+  /** update the content summary */
+  public void updateCounts(long sizeDelta, long filesDelta,
+      long dirsDelta, long spaceDelta) {
+    counts[0] += sizeDelta;
+    counts[1] += filesDelta;
+    counts[2] += dirsDelta;
+    counts[3] += spaceDelta;
+  }
+
+  /** Get the content counts */
+  public long[] getCounts() {
+    return counts;
+  }
+}

Propchange: 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1541978&r1=1541977&r2=1541978&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 (original)
+++ 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 Thu Nov 14 16:59:23 2013
@@ -81,6 +81,7 @@ public class FSDirectory implements Clos
   private final int maxComponentLength;
   private final int maxDirItems;
   private final int lsLimit;  // max list limit
+  private final int contentCountLimit; // max content summary counts per run
 
   // lock to protect the directory and BlockMap
   private ReentrantReadWriteLock dirLock;
@@ -111,6 +112,14 @@ public class FSDirectory implements Clos
     return this.dirLock.getReadHoldCount() > 0;
   }
 
+  public int getReadHoldCount() {
+    return this.dirLock.getReadHoldCount();
+  }
+
+  public int getWriteHoldCount() {
+    return this.dirLock.getWriteHoldCount();
+  }
+
   /**
    * Caches frequently used file names used in {@link INode} to reuse 
    * byte[] objects and reduce heap usage.
@@ -134,6 +143,10 @@ public class FSDirectory implements Clos
         DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
     this.lsLimit = configuredLimit>0 ?
         configuredLimit : DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT;
+
+    this.contentCountLimit = conf.getInt(
+        DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,
+        DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_DEFAULT);
     
     // filesystem limits
     this.maxComponentLength = conf.getInt(
@@ -1931,7 +1944,11 @@ public class FSDirectory implements Clos
         throw new FileNotFoundException("File does not exist: " + srcs);
       }
       else {
-        return targetNode.computeContentSummary();
+        // Make it relinquish locks everytime contentCountLimit entries are
+        // processed. 0 means disabled. I.e. blocking for the entire duration.
+        return targetNode.computeAndConvertContentSummary(
+            new ContentSummaryComputationContext(this, getFSNamesystem(),
+            contentCountLimit));
       }
     } finally {
       readUnlock();

Modified: 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1541978&r1=1541977&r2=1541978&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 (original)
+++ 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 Thu Nov 14 16:59:23 2013
@@ -473,6 +473,14 @@ public class FSNamesystem implements Nam
     return hasReadLock() || hasWriteLock();
   }
 
+  public int getReadHoldCount() {
+    return this.fsLock.getReadHoldCount();
+  }
+
+  public int getWriteHoldCount() {
+    return this.fsLock.getWriteHoldCount();
+  }
+
   /**
    * dirs is a list of directories where the filesystem directory state 
    * is stored

Modified: 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1541978&r1=1541977&r2=1541978&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
 (original)
+++ 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
 Thu Nov 14 16:59:23 2013
@@ -190,17 +190,26 @@ public abstract class INode implements C
    */
   abstract int collectSubtreeBlocksAndClear(List<Block> v);
 
-  /** Compute {@link ContentSummary}. */
+  /** Compute {@link ContentSummary}. Blocking computation. */
   public final ContentSummary computeContentSummary() {
-    long[] a = computeContentSummary(new long[]{0,0,0,0});
+    return computeAndConvertContentSummary(
+        new ContentSummaryComputationContext());
+  }
+
+  /** Compute {@link ContentSummary} */
+  public final ContentSummary computeAndConvertContentSummary(
+      ContentSummaryComputationContext summary) {
+    long[] a = computeContentSummary(summary).getCounts();
     return new ContentSummary(a[0], a[1], a[2], getNsQuota(), 
                               a[3], getDsQuota());
   }
   /**
-   * @return an array of three longs. 
+   * @return ContentSummaryComputationContext containing
+   * content counts.
    * 0: length, 1: file count, 2: directory count 3: disk space
    */
-  abstract long[] computeContentSummary(long[] summary);
+  abstract ContentSummaryComputationContext computeContentSummary(
+      ContentSummaryComputationContext summary);
   
   /**
    * Get the quota set for this inode

Modified: 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1541978&r1=1541977&r2=1541978&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
 (original)
+++ 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
 Thu Nov 14 16:59:23 2013
@@ -384,33 +384,60 @@ class INodeDirectory extends INode {
   }
 
   /** {@inheritDoc} */
-  long[] computeContentSummary(long[] summary) {
-    // Walk through the children of this node, using a new summary array
+  ContentSummaryComputationContext computeContentSummary(
+      ContentSummaryComputationContext summary) {
+    // Walk through the children of this node 
     // for the (sub)tree rooted at this node
-    assert 4 == summary.length;
-    long[] subtreeSummary = new long[]{0,0,0,0};
-    if (children != null) {
-      for (INode child : children) {
-        child.computeContentSummary(subtreeSummary);
+ 
+    // Save the original space count
+    long originalSpace = summary.getCounts()[3];
+    boolean stale = false;
+
+    // Explicit traversing is done to enable repositioning after relinquishing
+    // and reacquiring locks.
+    for (int i = 0; children != null && i < children.size(); i++) {
+      INode child = children.get(i);
+      byte[] childName = child.getLocalNameBytes();
+
+      long lastYieldCount = summary.getYieldCount();
+      child.computeContentSummary(summary);
+
+      // Check whether the computation was paused in the subtree.
+      // The counts may be off, but traversing the rest of children
+      // should be made safe.
+      if (lastYieldCount == summary.getYieldCount()) {
+        continue;
+      }
+
+      // The locks were released and reacquired. Check parent first.
+      stale = true;
+      if (getParent() == null) {
+        // Stop further counting and return whatever we have so far.
+        break;
       }
+
+      // Reposition in case the children list is changed. Decrement by 1
+      // since it will be incremented when loops.
+      i = nextChild(childName) - 1;
     }
-    if (this instanceof INodeDirectoryWithQuota) {
+
+    // increment the directory count for this directory.
+    summary.updateCounts(0, 0, 1, 0);
+
+    if (!stale && this instanceof INodeDirectoryWithQuota) {
       // Warn if the cached and computed diskspace values differ
       INodeDirectoryWithQuota node = (INodeDirectoryWithQuota)this;
       long space = node.diskspaceConsumed();
-      assert -1 == node.getDsQuota() || space == subtreeSummary[3];
-      if (-1 != node.getDsQuota() && space != subtreeSummary[3]) {
-        NameNode.LOG.warn("Inconsistent diskspace for directory "
-            +getLocalName()+". Cached: "+space+" Computed: 
"+subtreeSummary[3]);
+      long computedSpace = summary.getCounts()[3] - originalSpace;
+      if (-1 != node.getDsQuota() && space != computedSpace) {
+        NameNode.LOG.info("Inconsistent diskspace for directory "
+            +getLocalName()+". Cached: "+space+" Computed: "+computedSpace);
       }
     }
 
-    // update the passed summary array with the values for this node's subtree
-    for (int i = 0; i < summary.length; i++) {
-      summary[i] += subtreeSummary[i];
-    }
+    // Relinquish and reacquire locks if necessary.
+    summary.yield();
 
-    summary[2]++;
     return summary;
   }
 

Modified: 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1541978&r1=1541977&r2=1541978&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 (original)
+++ 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 Thu Nov 14 16:59:23 2013
@@ -169,10 +169,10 @@ public class INodeFile extends INode {
   }
 
   /** {@inheritDoc} */
-  long[] computeContentSummary(long[] summary) {
-    summary[0] += computeFileSize(true);
-    summary[1]++;
-    summary[3] += diskspaceConsumed();
+  ContentSummaryComputationContext computeContentSummary(
+     ContentSummaryComputationContext summary) {
+    summary.updateCounts(computeFileSize(true), 1, 0,
+        diskspaceConsumed());
     return summary;
   }
 

Modified: 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java?rev=1541978&r1=1541977&r2=1541978&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
 (original)
+++ 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
 Thu Nov 14 16:59:23 2013
@@ -68,8 +68,8 @@ public class INodeSymlink extends INode 
   }
 
   @Override
-  long[] computeContentSummary(long[] summary) {
-    summary[1]++; // Increment the file count
+  ContentSummaryComputationContext 
computeContentSummary(ContentSummaryComputationContext summary) {
+    summary.updateCounts(0, 1, 0, 0); // Increment the file count
     return summary;
   }
 

Modified: 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java?rev=1541978&r1=1541977&r2=1541978&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
 (original)
+++ 
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
 Thu Nov 14 16:59:23 2013
@@ -82,6 +82,9 @@ public class TestQuota {
     // Space quotas
     final int DEFAULT_BLOCK_SIZE = 512;
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
+    // Make it relinquish locks. When run serially, the result should
+    // be identical.
+    conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
     conf.setBoolean("dfs.support.append", true);
     final MiniDFSCluster cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     final FileSystem fs = cluster.getFileSystem();
@@ -359,6 +362,9 @@ public class TestQuota {
   @Test
   public void testNamespaceCommands() throws Exception {
     final Configuration conf = new HdfsConfiguration();
+    // Make it relinquish locks. When run serially, the result should
+    // be identical.
+    conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
     final MiniDFSCluster cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     final FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
@@ -532,6 +538,9 @@ public class TestQuota {
     // diskspace quotas
     conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, "512");
     conf.setBoolean("dfs.support.append", true);
+    // Make it relinquish locks. When run serially, the result should
+    // be identical.
+    conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
     final MiniDFSCluster cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     final FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
@@ -786,6 +795,9 @@ public class TestQuota {
     final int BLOCK_SIZE = 6 * 1024;
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
+    // Make it relinquish locks. When run serially, the result should
+    // be identical.
+    conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
     MiniDFSCluster cluster = 
       new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     cluster.waitActive();
@@ -848,6 +860,9 @@ public class TestQuota {
     final int BLOCK_SIZE = 6 * 1024;
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
+    // Make it relinquish locks. When run serially, the result should
+    // be identical.
+    conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
     MiniDFSCluster cluster = 
       new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     cluster.waitActive();


Reply via email to