Author: cmccabe
Date: Thu Mar  6 08:27:55 2014
New Revision: 1574799

URL: http://svn.apache.org/r1574799
Log:
HDFS-6061. Allow dfs.datanode.shared.file.descriptor.path to contain multiple 
entries and fall back when needed (cmccabe)

Modified:
    hadoop/common/branches/branch-2.4/hadoop-hdfs-project/   (props changed)
    hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/   (props 
changed)
    
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
   (props changed)
    
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
    
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
    
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/TestShortCircuitShm.java

Propchange: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project:r1574796

Propchange: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1574796

Modified: 
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1574799&r1=1574798&r2=1574799&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
(original)
+++ 
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
Thu Mar  6 08:27:55 2014
@@ -124,6 +124,9 @@ Release 2.4.0 - UNRELEASED
     HDFS-6044. Add property for setting the NFS look up time for users
     (brandonli)
 
+    HDFS-6061. Allow dfs.datanode.shared.file.descriptor.path to contain
+    multiple entries and fall back when needed (cmccabe)
+
   OPTIMIZATIONS
 
     HDFS-5790. LeaseManager.findPath is very slow when many leases need 
recovery

Propchange: 
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged 
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1574796

Modified: 
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1574799&r1=1574798&r2=1574799&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 (original)
+++ 
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 Thu Mar  6 08:27:55 2014
@@ -473,8 +473,8 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_NAMENODE_STARTUP_KEY = 
"dfs.namenode.startup";
   public static final String  DFS_DATANODE_KEYTAB_FILE_KEY = 
"dfs.datanode.keytab.file";
   public static final String  DFS_DATANODE_USER_NAME_KEY = 
"dfs.datanode.kerberos.principal";
-  public static final String  DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATH = 
"dfs.datanode.shared.file.descriptor.path";
-  public static final String  DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATH_DEFAULT 
= "/dev/shm";
+  public static final String  DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS = 
"dfs.datanode.shared.file.descriptor.paths";
+  public static final String  
DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS_DEFAULT = "/dev/shm,/tmp";
   public static final String  
DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS = 
"dfs.short.circuit.shared.memory.watcher.interrupt.check.ms";
   public static final int     
DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT = 60000;
   public static final String  DFS_NAMENODE_KEYTAB_FILE_KEY = 
"dfs.namenode.keytab.file";

Modified: 
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java?rev=1574799&r1=1574798&r2=1574799&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
 (original)
+++ 
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
 Thu Mar  6 08:27:55 2014
@@ -17,14 +17,15 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATH;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATH_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT;
 
 import java.io.Closeable;
 import java.io.FileInputStream;
 import java.io.IOException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Set;
@@ -45,7 +46,9 @@ import org.apache.hadoop.net.unix.Domain
 import org.apache.hadoop.net.unix.DomainSocketWatcher;
 
 import com.google.common.base.Preconditions;
+import com.google.common.base.Splitter;
 import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Iterables;
 
 /*
  * Manages client short-circuit memory segments on the DataNode.
@@ -149,38 +152,35 @@ public class ShortCircuitRegistry {
     SharedFileDescriptorFactory shmFactory = null;
     DomainSocketWatcher watcher = null;
     try {
-      String loadingFailureReason =
-          SharedFileDescriptorFactory.getLoadingFailureReason();
-      if (loadingFailureReason != null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Disabling ShortCircuitRegistry because " +
-                    loadingFailureReason);
-        }
-        return;
-      }
-      String shmPath = conf.get(DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATH,
-          DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATH_DEFAULT);
-      if (shmPath.isEmpty()) {
-        LOG.debug("Disabling ShortCircuitRegistry because shmPath was not 
set.");
-        return;
-      }
       int interruptCheck = conf.getInt(
           DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,
           DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT);
       if (interruptCheck <= 0) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Disabling ShortCircuitRegistry because " +
-                    "interruptCheckMs was set to " + interruptCheck);
-        }
-        return;
+        throw new IOException(
+            DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS +
+            " was set to " + interruptCheck);
+      }
+      String shmPaths[] =
+          conf.getTrimmedStrings(DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS);
+      if (shmPaths.length == 0) {
+        shmPaths =
+            DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS_DEFAULT.split(",");
+      }
+      shmFactory = SharedFileDescriptorFactory.
+          create("HadoopShortCircuitShm_", shmPaths);
+      String dswLoadingFailure = DomainSocketWatcher.getLoadingFailureReason();
+      if (dswLoadingFailure != null) {
+        throw new IOException(dswLoadingFailure);
       }
-      shmFactory = 
-          new SharedFileDescriptorFactory("HadoopShortCircuitShm_", shmPath);
       watcher = new DomainSocketWatcher(interruptCheck);
       enabled = true;
       if (LOG.isDebugEnabled()) {
         LOG.debug("created new ShortCircuitRegistry with interruptCheck=" +
-                  interruptCheck + ", shmPath=" + shmPath);
+                  interruptCheck + ", shmPath=" + shmFactory.getPath());
+      }
+    } catch (IOException e) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Disabling ShortCircuitRegistry", e);
       }
     } finally {
       this.enabled = enabled;

Modified: 
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1574799&r1=1574798&r2=1574799&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
 (original)
+++ 
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
 Thu Mar  6 08:27:55 2014
@@ -1138,13 +1138,13 @@
 </property>
 
 <property>
-  <name>dfs.datanode.shared.file.descriptor.path</name>
-  <value>/dev/shm</value>
+  <name>dfs.datanode.shared.file.descriptor.paths</name>
+  <value>/dev/shm,/tmp</value>
   <description>
-    The path to use when creating file descriptors that will be shared
-    between the DataNode and the DFSClient.  Typically we use /dev/shm, so
-    that the file descriptors will not be written to disk.  Systems that
-    don't have /dev/shm should use /tmp.
+    A comma-separated list of paths to use when creating file descriptors that
+    will be shared between the DataNode and the DFSClient.  Typically we use
+    /dev/shm, so that the file descriptors will not be written to disk.
+    Systems that don't have /dev/shm will fall back to /tmp by default.
   </description>
 </property>
 

Modified: 
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/TestShortCircuitShm.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/TestShortCircuitShm.java?rev=1574799&r1=1574798&r2=1574799&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/TestShortCircuitShm.java
 (original)
+++ 
hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/TestShortCircuitShm.java
 Thu Mar  6 08:27:55 2014
@@ -22,11 +22,9 @@ import java.io.FileInputStream;
 import java.util.ArrayList;
 import java.util.Iterator;
 
-import org.apache.commons.lang.SystemUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.SharedFileDescriptorFactory;
 import org.apache.hadoop.hdfs.ExtendedBlockId;
 import org.apache.hadoop.hdfs.ShortCircuitShm;
@@ -45,8 +43,8 @@ public class TestShortCircuitShm {
 
   @Before
   public void before() {
-    Assume.assumeTrue(NativeIO.isAvailable());
-    Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
+    Assume.assumeTrue(null == 
+        SharedFileDescriptorFactory.getLoadingFailureReason());
   }
 
   @Test(timeout=60000)
@@ -54,7 +52,8 @@ public class TestShortCircuitShm {
     File path = new File(TEST_BASE, "testStartupShutdown");
     path.mkdirs();
     SharedFileDescriptorFactory factory =
-        new SharedFileDescriptorFactory("shm_", path.getAbsolutePath());
+        SharedFileDescriptorFactory.create("shm_",
+            new String[] { path.getAbsolutePath() } );
     FileInputStream stream =
         factory.createDescriptor("testStartupShutdown", 4096);
     ShortCircuitShm shm = new ShortCircuitShm(ShmId.createRandom(), stream);
@@ -68,7 +67,8 @@ public class TestShortCircuitShm {
     File path = new File(TEST_BASE, "testAllocateSlots");
     path.mkdirs();
     SharedFileDescriptorFactory factory =
-        new SharedFileDescriptorFactory("shm_", path.getAbsolutePath());
+        SharedFileDescriptorFactory.create("shm_", 
+            new String[] { path.getAbsolutePath() });
     FileInputStream stream =
         factory.createDescriptor("testAllocateSlots", 4096);
     ShortCircuitShm shm = new ShortCircuitShm(ShmId.createRandom(), stream);


Reply via email to