Author: shv
Date: Tue Nov 20 02:00:20 2012
New Revision: 1411508

URL: http://svn.apache.org/viewvc?rev=1411508&view=rev
Log:
HDFS-4179. BackupNode: allow reads, fix checkpointing, safeMode. (shv)

Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1411508&r1=1411507&r2=1411508&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Nov 20 
02:00:20 2012
@@ -619,6 +619,8 @@ Release 2.0.3-alpha - Unreleased 
 
     HDFS-4178. Shell scripts should not close stderr (Andy Isaacson via daryn)
 
+    HDFS-4179. BackupNode: allow reads, fix checkpointing, safeMode. (shv)
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1411508&r1=1411507&r2=1411508&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
 Tue Nov 20 02:00:20 2012
@@ -69,6 +69,8 @@ public class BackupNode extends NameNode
   private static final String BN_HTTP_ADDRESS_NAME_KEY = 
DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY;
   private static final String BN_HTTP_ADDRESS_DEFAULT = 
DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT;
   private static final String BN_SERVICE_RPC_ADDRESS_KEY = 
DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY;
+  private static final float  BN_SAFEMODE_THRESHOLD_PCT_DEFAULT = 1.5f;
+  private static final int    BN_SAFEMODE_EXTENSION_DEFAULT = 
Integer.MAX_VALUE;
 
   /** Name-node proxy */
   NamenodeProtocol namenode;
@@ -127,6 +129,10 @@ public class BackupNode extends NameNode
 
   @Override // NameNode
   protected void loadNamesystem(Configuration conf) throws IOException {
+    conf.setFloat(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,
+                                BN_SAFEMODE_THRESHOLD_PCT_DEFAULT);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,
+                                BN_SAFEMODE_EXTENSION_DEFAULT);
     BackupImage bnImage = new BackupImage(conf);
     this.namesystem = new FSNamesystem(conf, bnImage);
     bnImage.setNamesystem(namesystem);
@@ -423,9 +429,9 @@ public class BackupNode extends NameNode
         return;
       }
       if (OperationCategory.JOURNAL != op &&
-          !(OperationCategory.READ == op && allowStaleStandbyReads)) {
+          !(OperationCategory.READ == op && !isRole(NamenodeRole.CHECKPOINT))) 
{
         String msg = "Operation category " + op
-            + " is not supported at the BackupNode";
+            + " is not supported at " + getRole();
         throw new StandbyException(msg);
       }
     }

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java?rev=1411508&r1=1411507&r2=1411508&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
 Tue Nov 20 02:00:20 2012
@@ -206,6 +206,7 @@ class Checkpointer extends Daemon {
     RemoteEditLogManifest manifest =
       getRemoteNamenodeProxy().getEditLogManifest(bnImage.getLastAppliedTxId() 
+ 1);
 
+    boolean needReloadImage = false;
     if (!manifest.getLogs().isEmpty()) {
       RemoteEditLog firstRemoteLog = manifest.getLogs().get(0);
       // we don't have enough logs to roll forward using only logs. Need
@@ -218,13 +219,10 @@ class Checkpointer extends Daemon {
             bnStorage, true);
         bnImage.saveDigestAndRenameCheckpointImage(
             sig.mostRecentCheckpointTxId, downloadedHash);
-        
-        LOG.info("Loading image with txid " + sig.mostRecentCheckpointTxId);
-        File file = bnStorage.findImageFile(sig.mostRecentCheckpointTxId);
-        bnImage.reloadFromImageFile(file, backupNode.getNamesystem());
+        lastApplied = sig.mostRecentCheckpointTxId;
+        needReloadImage = true;
       }
-      
-      lastApplied = bnImage.getLastAppliedTxId();
+
       if (firstRemoteLog.getStartTxId() > lastApplied + 1) {
         throw new IOException("No logs to roll forward from " + lastApplied);
       }
@@ -234,7 +232,12 @@ class Checkpointer extends Daemon {
         TransferFsImage.downloadEditsToStorage(
             backupNode.nnHttpAddress, log, bnStorage);
       }
-  
+
+      if(needReloadImage) {
+        LOG.info("Loading image with txid " + sig.mostRecentCheckpointTxId);
+        File file = bnStorage.findImageFile(sig.mostRecentCheckpointTxId);
+        bnImage.reloadFromImageFile(file, backupNode.getNamesystem());
+      }
       rollForwardByApplyingLogs(manifest, bnImage, backupNode.getNamesystem());
     }
     
@@ -243,8 +246,9 @@ class Checkpointer extends Daemon {
     backupNode.namesystem.writeLock();
     try {
       backupNode.namesystem.dir.setReady();
-      backupNode.namesystem.setBlockTotal();
-      
+      if(backupNode.namesystem.getBlocksTotal() > 0) {
+        backupNode.namesystem.setBlockTotal();
+      }
       bnImage.saveFSImageInAllDirs(backupNode.getNamesystem(), txid);
       bnStorage.writeAll();
     } finally {
@@ -284,12 +288,12 @@ class Checkpointer extends Daemon {
   
     List<EditLogInputStream> editsStreams = Lists.newArrayList();    
     for (RemoteEditLog log : manifest.getLogs()) {
-      File f = dstStorage.findFinalizedEditsFile(
-          log.getStartTxId(), log.getEndTxId());
-      if (log.getStartTxId() > dstImage.getLastAppliedTxId()) {
+      if (log.getEndTxId() > dstImage.getLastAppliedTxId()) {
+        File f = dstStorage.findFinalizedEditsFile(
+            log.getStartTxId(), log.getEndTxId());
         editsStreams.add(new EditLogFileInputStream(f, log.getStartTxId(), 
                                                     log.getEndTxId(), true));
-       }
+      }
     }
     LOG.info("Checkpointer about to load edits from " +
         editsStreams.size() + " stream(s).");

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1411508&r1=1411507&r2=1411508&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
 Tue Nov 20 02:00:20 2012
@@ -38,7 +38,6 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable;
@@ -1020,6 +1019,7 @@ public class FSImage implements Closeabl
   NamenodeCommand startCheckpoint(NamenodeRegistration bnReg, // backup node
                                   NamenodeRegistration nnReg) // active 
name-node
   throws IOException {
+    LOG.info("Start checkpoint at txid " + getEditLog().getLastWrittenTxId());
     String msg = null;
     // Verify that checkpoint is allowed
     if(bnReg.getNamespaceID() != storage.getNamespaceID())
@@ -1059,6 +1059,7 @@ public class FSImage implements Closeabl
    * @throws IOException if the checkpoint fields are inconsistent
    */
   void endCheckpoint(CheckpointSignature sig) throws IOException {
+    LOG.info("End checkpoint at txid " + getEditLog().getLastWrittenTxId());
     sig.validateStorageInfo(this);
   }
 

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1411508&r1=1411507&r2=1411508&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 Tue Nov 20 02:00:20 2012
@@ -3994,7 +3994,8 @@ public class FSNamesystem implements Nam
         // of the number of total blocks in the system.
         this.shouldIncrementallyTrackBlocks = true;
       }
-      
+      if(blockSafe < 0)
+        this.blockSafe = 0;
       checkMode();
     }
       

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1411508&r1=1411507&r2=1411508&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
 Tue Nov 20 02:00:20 2012
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
@@ -99,7 +100,10 @@ public class TestBackupNode {
     c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
         "127.0.0.1:0");
 
-    return (BackupNode)NameNode.createNameNode(new 
String[]{startupOpt.getName()}, c);
+    BackupNode bn = (BackupNode)NameNode.createNameNode(
+        new String[]{startupOpt.getName()}, c);
+    assertTrue(bn.getRole() + " must be in SafeMode.", bn.isInSafeMode());
+    return bn;
   }
 
   void waitCheckpointDone(MiniDFSCluster cluster, long txid) {
@@ -358,11 +362,22 @@ public class TestBackupNode {
         DFSTestUtil.createFile(bnFS, file3, fileSize, fileSize, blockSize,
             replication, seed);
       } catch (IOException eio) {
-        LOG.info("Write to BN failed as expected: ", eio);
+        LOG.info("Write to " + backup.getRole() + " failed as expected: ", 
eio);
         canWrite = false;
       }
       assertFalse("Write to BackupNode must be prohibited.", canWrite);
 
+      // Reads are allowed for BackupNode, but not for CheckpointNode
+      boolean canRead = true;
+      try {
+        bnFS.exists(file2);
+      } catch (IOException eio) {
+        LOG.info("Read from " + backup.getRole() + " failed: ", eio);
+        canRead = false;
+      }
+      assertEquals("Reads to BackupNode are allowed, but not CheckpointNode.",
+          canRead, backup.isRole(NamenodeRole.BACKUP));
+
       DFSTestUtil.createFile(fileSys, file3, fileSize, fileSize, blockSize,
           replication, seed);
       


Reply via email to