Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0b36dcda7 -> 4cee535aa


HDFS-11085. Add unit test for NameNode failing to start when name dir is 
unwritable. Contributed by Xiaobing Zhou

(cherry picked from commit 0c0ab102ab392ba07ed2aa8d8a67eef4c20cad9b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cee535a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cee535a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cee535a

Branch: refs/heads/branch-2
Commit: 4cee535aa56d485d02181715969ad2bfb70af246
Parents: 0b36dcd
Author: Mingliang Liu <lium...@apache.org>
Authored: Fri Nov 4 14:34:40 2016 -0700
Committer: Mingliang Liu <lium...@apache.org>
Committed: Fri Nov 4 14:39:54 2016 -0700

----------------------------------------------------------------------
 .../hdfs/server/namenode/TestStartup.java       | 55 +++++++++++++++++++-
 1 file changed, 54 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cee535a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 068e63a..2fe25e0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -19,8 +19,12 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption.IMPORT;
 import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
+import static org.hamcrest.CoreMatchers.allOf;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -29,6 +33,8 @@ import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.net.InetAddress;
 import java.net.URI;
+import java.nio.file.Paths;
+import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Random;
@@ -52,6 +58,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
+import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -125,7 +132,7 @@ public class TestStartup {
         fileAsURI(new File(hdfsDir, "secondary")).toString());
     config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
               WILDCARD_HTTP_HOST + "0");
-    
+
     FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
   }
 
@@ -661,6 +668,52 @@ public class TestStartup {
     }
   }
 
+  @Test(timeout = 30000)
+  public void testNNFailToStartOnReadOnlyNNDir() throws Exception {
+    /* set NN dir */
+    final String nnDirStr = Paths.get(
+        hdfsDir.toString(),
+        GenericTestUtils.getMethodName(), "name").toString();
+    config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nnDirStr);
+
+    try(MiniDFSCluster cluster = new MiniDFSCluster.Builder(config)
+        .numDataNodes(1)
+        .manageNameDfsDirs(false)
+        .build()) {
+      cluster.waitActive();
+
+      /* get and verify NN dir */
+      final Collection<URI> nnDirs = FSNamesystem.getNamespaceDirs(config);
+      assertNotNull(nnDirs);
+      assertTrue(nnDirs.iterator().hasNext());
+      assertEquals(
+          "NN dir should be created after NN startup.",
+          nnDirStr,
+          nnDirs.iterator().next().getPath());
+      final File nnDir = new File(nnDirStr);
+      assertTrue(nnDir.exists());
+      assertTrue(nnDir.isDirectory());
+
+      try {
+        /* set read only */
+        assertTrue(
+            "Setting NN dir read only should succeed.",
+            nnDir.setReadOnly());
+        cluster.restartNameNodes();
+        fail("Restarting NN should fail on read only NN dir.");
+      } catch (InconsistentFSStateException e) {
+        assertThat(e.toString(), is(allOf(
+            containsString("InconsistentFSStateException"),
+            containsString(nnDirStr),
+            containsString("in an inconsistent state"),
+            containsString(
+                "storage directory does not exist or is not accessible."))));
+      } finally {
+        /* set back to writable in order to clean it */
+        assertTrue("Setting NN dir should succeed.", nnDir.setWritable(true));
+      }
+    }
+  }
 
   /**
    * Verify the following scenario.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to