Repository: hadoop
Updated Branches:
  refs/heads/YARN-1197 23f28dfe6 -> 1d1905e90 (forced update)


HDFS-8856. Make LeaseManager#countPath O(1). (Contributed by Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d4eee71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d4eee71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d4eee71

Branch: refs/heads/YARN-1197
Commit: 6d4eee718a3fe1450a627128eb94728011bd9b68
Parents: 8572a5a
Author: Arpit Agarwal <a...@apache.org>
Authored: Thu Aug 6 18:51:28 2015 -0700
Committer: Arpit Agarwal <a...@apache.org>
Committed: Thu Aug 6 18:51:28 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../hdfs/server/namenode/Checkpointer.java      |  4 +-
 .../hdfs/server/namenode/FSNamesystem.java      | 14 +++--
 .../hdfs/server/namenode/LeaseManager.java      | 17 ++---
 .../hdfs/server/namenode/TestLeaseManager.java  | 65 +++++++++++++++++---
 5 files changed, 79 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d4eee71/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 138ed10..051dc8a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -767,6 +767,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-8815. DFS getStoragePolicy implementation using single RPC call
     (Surendra Singh Lilhore via vinayakumarb)
 
+    HDFS-8856. Make LeaseManager#countPath O(1). (Arpit Agarwal)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d4eee71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
index 25b87f0..9087629 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
@@ -254,7 +254,9 @@ class Checkpointer extends Daemon {
     try {
       backupNode.namesystem.setImageLoaded();
       if(backupNode.namesystem.getBlocksTotal() > 0) {
-        backupNode.namesystem.setBlockTotal();
+        long completeBlocksTotal =
+            backupNode.namesystem.getCompleteBlocksTotal();
+        backupNode.namesystem.setBlockTotal(completeBlocksTotal);
       }
       bnImage.saveFSImageInAllDirs(backupNode.getNamesystem(), txid);
       if (!backupNode.namesystem.isRollingUpgrade()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d4eee71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 1cde47c..4cc3073 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1042,9 +1042,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
       assert safeMode != null && !isPopulatingReplQueues();
       StartupProgress prog = NameNode.getStartupProgress();
       prog.beginPhase(Phase.SAFEMODE);
+      long completeBlocksTotal = getCompleteBlocksTotal();
       prog.setTotal(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS,
-        getCompleteBlocksTotal());
-      setBlockTotal();
+          completeBlocksTotal);
+      setBlockTotal(completeBlocksTotal);
       blockManager.activate(conf);
     } finally {
       writeUnlock();
@@ -4686,12 +4687,12 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   /**
    * Set the total number of blocks in the system. 
    */
-  public void setBlockTotal() {
+  public void setBlockTotal(long completeBlocksTotal) {
     // safeMode is volatile, and may be set to null at any time
     SafeModeInfo safeMode = this.safeMode;
     if (safeMode == null)
       return;
-    safeMode.setBlockTotal((int) getCompleteBlocksTotal());
+    safeMode.setBlockTotal((int) completeBlocksTotal);
   }
 
   /**
@@ -4723,13 +4724,14 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   /**
    * Get the total number of COMPLETE blocks in the system.
    * For safe mode only complete blocks are counted.
+   * This is invoked only during NN startup and checkpointing.
    */
-  private long getCompleteBlocksTotal() {
+  public long getCompleteBlocksTotal() {
     // Calculate number of blocks under construction
     long numUCBlocks = 0;
     readLock();
-    numUCBlocks = leaseManager.getNumUnderConstructionBlocks();
     try {
+      numUCBlocks = leaseManager.getNumUnderConstructionBlocks();
       return getBlocksTotal() - numUCBlocks;
     } finally {
       readUnlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d4eee71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index f954a58..1a1edaf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -22,6 +22,7 @@ import static org.apache.hadoop.util.Time.monotonicNow;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -128,15 +129,13 @@ public class LeaseManager {
 
   /** @return the number of leases currently in the system */
   @VisibleForTesting
-  public synchronized int countLease() {return sortedLeases.size();}
+  public synchronized int countLease() {
+    return sortedLeases.size();
+  }
 
   /** @return the number of paths contained in all leases */
-  synchronized int countPath() {
-    int count = 0;
-    for (Lease lease : sortedLeases) {
-      count += lease.getFiles().size();
-    }
-    return count;
+  synchronized long countPath() {
+    return leasesById.size();
   }
 
   /**
@@ -280,7 +279,9 @@ public class LeaseManager {
       return holder.hashCode();
     }
     
-    private Collection<Long> getFiles() { return files; }
+    private Collection<Long> getFiles() {
+      return Collections.unmodifiableCollection(files);
+    }
 
     String getHolder() {
       return holder;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d4eee71/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
index 96907f8..de30161 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
@@ -17,19 +17,28 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.hamcrest.CoreMatchers.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 
 import com.google.common.collect.Lists;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.junit.Rule;
 import org.junit.Test;
-import org.mockito.Mockito;
+import org.junit.rules.Timeout;
 
 import java.util.ArrayList;
 
+import static org.junit.Assert.assertThat;
 import static org.mockito.Mockito.*;
 
 public class TestLeaseManager {
+  @Rule
+  public Timeout timeout = new Timeout(300000);
+
   @Test
   public void testRemoveLeases() throws Exception {
     FSNamesystem fsn = mock(FSNamesystem.class);
@@ -52,14 +61,9 @@ public class TestLeaseManager {
    * leases, the Namenode does't enter an infinite loop while holding the FSN
    * write lock and thus become unresponsive
    */
-  @Test (timeout=1000)
+  @Test
   public void testCheckLeaseNotInfiniteLoop() {
-    FSDirectory dir = Mockito.mock(FSDirectory.class);
-    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
-    Mockito.when(fsn.isRunning()).thenReturn(true);
-    Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Mockito.when(fsn.getFSDirectory()).thenReturn(dir);
-    LeaseManager lm = new LeaseManager(fsn);
+    LeaseManager lm = new LeaseManager(makeMockFsNameSystem());
 
     //Make sure the leases we are going to add exceed the hard limit
     lm.setLeasePeriod(0, 0);
@@ -73,4 +77,49 @@ public class TestLeaseManager {
     //Initiate a call to checkLease. This should exit within the test timeout
     lm.checkLeases();
   }
+
+
+  @Test
+  public void testCountPath() {
+    LeaseManager lm = new LeaseManager(makeMockFsNameSystem());
+
+    lm.addLease("holder1", 1);
+    assertThat(lm.countPath(), is(1L));
+
+    lm.addLease("holder2", 2);
+    assertThat(lm.countPath(), is(2L));
+    lm.addLease("holder2", 2);                   // Duplicate addition
+    assertThat(lm.countPath(), is(2L));
+
+    assertThat(lm.countPath(), is(2L));
+
+    // Remove a couple of non-existing leases. countPath should not change.
+    lm.removeLease("holder2", stubInodeFile(3));
+    lm.removeLease("InvalidLeaseHolder", stubInodeFile(1));
+    assertThat(lm.countPath(), is(2L));
+
+    INodeFile file = stubInodeFile(1);
+    lm.reassignLease(lm.getLease(file), file, "holder2");
+    assertThat(lm.countPath(), is(2L));          // Count unchanged on reassign
+
+    lm.removeLease("holder2", stubInodeFile(2)); // Remove existing
+    assertThat(lm.countPath(), is(1L));
+  }
+
+  private static FSNamesystem makeMockFsNameSystem() {
+    FSDirectory dir = mock(FSDirectory.class);
+    FSNamesystem fsn = mock(FSNamesystem.class);
+    when(fsn.isRunning()).thenReturn(true);
+    when(fsn.hasWriteLock()).thenReturn(true);
+    when(fsn.getFSDirectory()).thenReturn(dir);
+    return fsn;
+  }
+
+  private static INodeFile stubInodeFile(long inodeId) {
+    PermissionStatus p = new PermissionStatus(
+        "dummy", "dummy", new FsPermission((short) 0777));
+    return new INodeFile(
+        inodeId, "/foo".getBytes(), p, 0L, 0L,
+        BlockInfo.EMPTY_ARRAY, (short) 1, 1L);
+  }
 }

Reply via email to