YARN-5280. Allow YARN containers to run with Java Security Manager (gphillips 
via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f6dfe02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f6dfe02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f6dfe02

Branch: refs/heads/YARN-5972
Commit: 6f6dfe0202249c129b36edfd145a2224140139cc
Parents: 899d5c4
Author: Robert Kanter <rkan...@apache.org>
Authored: Wed Mar 1 14:53:47 2017 -0800
Committer: Robert Kanter <rkan...@apache.org>
Committed: Wed Mar 1 14:53:47 2017 -0800

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |  20 +
 .../src/main/resources/yarn-default.xml         |  26 +
 .../server/nodemanager/ContainerExecutor.java   |   9 +
 .../nodemanager/LinuxContainerExecutor.java     |  23 +
 .../launcher/ContainerLaunch.java               |  34 +-
 .../DelegatingLinuxContainerRuntime.java        |  30 +-
 .../JavaSandboxLinuxContainerRuntime.java       | 495 +++++++++++++++++++
 .../runtime/LinuxContainerRuntimeConstants.java |   2 +
 .../executor/ContainerPrepareContext.java       | 119 +++++
 .../src/main/resources/java.policy              |  63 +++
 .../TestJavaSandboxLinuxContainerRuntime.java   | 364 ++++++++++++++
 11 files changed, 1155 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f6dfe02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index cdccec6..b366855 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1465,6 +1465,26 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_NM_DOCKER_DEFAULT_CONTAINER_NETWORK =
       "host";
 
+  /** The mode in which the Java Container Sandbox should run detailed by
+   *  the JavaSandboxLinuxContainerRuntime. */
+  public static final String YARN_CONTAINER_SANDBOX =
+      LINUX_CONTAINER_RUNTIME_PREFIX + "sandbox-mode";
+  public static final String DEFAULT_YARN_CONTAINER_SANDBOX = "disabled";
+
+  /** Permissions for application local directories.*/
+  public static final String YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS =
+      YARN_CONTAINER_SANDBOX + ".local-dirs.permissions";
+  public static final String DEFAULT_YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS =
+      "read";
+
+  /** Location for non-default java policy file.*/
+  public static final String YARN_CONTAINER_SANDBOX_POLICY =
+      YARN_CONTAINER_SANDBOX + ".policy";
+
+  /** The group which will run by default without the java security manager.*/
+  public static final String YARN_CONTAINER_SANDBOX_WHITELIST_GROUP =
+      YARN_CONTAINER_SANDBOX + ".whitelist-group";
+
   /** The path to the Linux container executor.*/
   public static final String NM_LINUX_CONTAINER_EXECUTOR_PATH =
     NM_PREFIX + "linux-container-executor.path";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f6dfe02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 368946e..645a342 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1584,6 +1584,32 @@
   </property>
 
   <property>
+    <description>The mode in which the Java Container Sandbox should run 
detailed by
+      the JavaSandboxLinuxContainerRuntime.</description>
+    <name>yarn.nodemanager.runtime.linux.sandbox-mode</name>
+    <value>disabled</value>
+  </property>
+
+  <property>
+    <description>Permissions for application local directories.</description>
+    
<name>yarn.nodemanager.runtime.linux.sandbox-mode.local-dirs.permissions</name>
+    <value>read</value>
+  </property>
+
+  <property>
+    <description>Location for non-default java policy file.</description>
+    <name>yarn.nodemanager.runtime.linux.sandbox-mode.policy</name>
+    <value></value>
+  </property>
+
+  <property>
+    <description>The group which will run by default without the java security
+      manager.</description>
+    <name>yarn.nodemanager.runtime.linux.sandbox-mode.whitelist-group</name>
+    <value></value>
+  </property>
+
+  <property>
     <description>This flag determines whether memory limit will be set for the 
Windows Job
     Object of the containers launched by the default container 
executor.</description>
     <name>yarn.nodemanager.windows-container.memory-limit.enabled</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f6dfe02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 158585e..64d2af9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch;
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerPrepareContext;
 import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReacquisitionContext;
@@ -148,6 +149,14 @@ public abstract class ContainerExecutor implements 
Configurable {
   public abstract void startLocalizer(LocalizerStartContext ctx)
     throws IOException, InterruptedException;
 
+  /**
+   * Prepare the container prior to the launch environment being written.
+   * @param ctx Encapsulates information necessary for launching containers.
+   * @throws IOException if errors occur during container preparation
+   */
+  public void prepareContainer(ContainerPrepareContext ctx) throws
+      IOException{
+  }
 
   /**
    * Launch the container on the node. This is a blocking call and returns only

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f6dfe02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 323c443..46b6dfb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.Cont
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext;
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerPrepareContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReacquisitionContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
@@ -363,6 +364,28 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   }
 
   @Override
+  public void prepareContainer(ContainerPrepareContext ctx) throws IOException 
{
+
+    ContainerRuntimeContext.Builder builder =
+        new ContainerRuntimeContext.Builder(ctx.getContainer());
+
+    builder.setExecutionAttribute(LOCALIZED_RESOURCES,
+            ctx.getLocalizedResources())
+        .setExecutionAttribute(USER, ctx.getUser())
+        .setExecutionAttribute(CONTAINER_LOCAL_DIRS,
+            ctx.getContainerLocalDirs())
+        .setExecutionAttribute(CONTAINER_RUN_CMDS, ctx.getCommands())
+        .setExecutionAttribute(CONTAINER_ID_STR,
+            ctx.getContainer().getContainerId().toString());
+
+    try {
+      linuxContainerRuntime.prepareContainer(builder.build());
+    } catch (ContainerExecutionException e) {
+      throw new IOException("Unable to prepare container: ", e);
+    }
+  }
+
+  @Override
   public int launchContainer(ContainerStartContext ctx) throws IOException {
     Container container = ctx.getContainer();
     Path nmPrivateContainerScriptPath = ctx.getNmPrivateContainerScriptPath();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f6dfe02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 823457f..a1c407f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -77,6 +77,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerPrepareContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
 import org.apache.hadoop.yarn.server.nodemanager.util.ProcessIdFileReader;
@@ -198,18 +199,14 @@ public class ContainerLaunch implements Callable<Integer> 
{
 
       FileContext lfs = FileContext.getLocalFSFileContext();
 
-      Path nmPrivateContainerScriptPath =
-          dirsHandler.getLocalPathForWrite(
+      Path nmPrivateContainerScriptPath = dirsHandler.getLocalPathForWrite(
               getContainerPrivateDir(appIdStr, containerIdStr) + Path.SEPARATOR
                   + CONTAINER_SCRIPT);
-      Path nmPrivateTokensPath =
-          dirsHandler.getLocalPathForWrite(
-              getContainerPrivateDir(appIdStr, containerIdStr)
-                  + Path.SEPARATOR
+      Path nmPrivateTokensPath = dirsHandler.getLocalPathForWrite(
+              getContainerPrivateDir(appIdStr, containerIdStr) + Path.SEPARATOR
                   + String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT,
                       containerIdStr));
-      Path nmPrivateClasspathJarDir = 
-          dirsHandler.getLocalPathForWrite(
+      Path nmPrivateClasspathJarDir = dirsHandler.getLocalPathForWrite(
               getContainerPrivateDir(appIdStr, containerIdStr));
       DataOutputStream containerScriptOutStream = null;
       DataOutputStream tokensOutStream = null;
@@ -224,7 +221,6 @@ public class ContainerLaunch implements Callable<Integer> {
       recordContainerWorkDir(containerID, containerWorkDir.toString());
 
       String pidFileSubpath = getPidFileSubpath(appIdStr, containerIdStr);
-
       // pid file should be in nm private dir so that it is not 
       // accessible by users
       pidFilePath = dirsHandler.getLocalPathForWrite(pidFileSubpath);
@@ -240,11 +236,9 @@ public class ContainerLaunch implements Callable<Integer> {
         throw new IOException("Most of the disks failed. "
             + dirsHandler.getDisksHealthReport(false));
       }
-
       try {
         // /////////// Write out the container-script in the nmPrivate space.
         List<Path> appDirs = new ArrayList<Path>(localDirs.size());
-
         for (String localDir : localDirs) {
           Path usersdir = new Path(localDir, ContainerLocalizer.USERCACHE);
           Path userdir = new Path(usersdir, user);
@@ -258,18 +252,22 @@ public class ContainerLaunch implements Callable<Integer> 
{
         // Set the token location too.
         environment.put(
             ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME, 
-            new Path(containerWorkDir, 
+            new Path(containerWorkDir,
                 FINAL_CONTAINER_TOKENS_FILE).toUri().getPath());
         // Sanitize the container's environment
         sanitizeEnv(environment, containerWorkDir, appDirs, userLocalDirs,
-            containerLogDirs,
-          localResources, nmPrivateClasspathJarDir);
-
+            containerLogDirs, localResources, nmPrivateClasspathJarDir);
+
+        exec.prepareContainer(new ContainerPrepareContext.Builder()
+            .setContainer(container)
+            .setLocalizedResources(localResources)
+            .setUser(user)
+            .setContainerLocalDirs(containerLocalDirs)
+            .setCommands(launchContext.getCommands()).build());
         // Write out the environment
         exec.writeLaunchEnv(containerScriptOutStream, environment,
           localResources, launchContext.getCommands(),
             new Path(containerLogDirs.get(0)), user);
-
         // /////////// End of writing out container-script
 
         // /////////// Write out the container-tokens in the nmPrivate space.
@@ -295,8 +293,7 @@ public class ContainerLaunch implements Callable<Integer> {
           .setFilecacheDirs(filecacheDirs)
           .setUserLocalDirs(userLocalDirs)
           .setContainerLocalDirs(containerLocalDirs)
-          .setContainerLogDirs(containerLogDirs)
-          .build());
+          .setContainerLogDirs(containerLogDirs).build());
     } catch (Throwable e) {
       LOG.warn("Failed to launch container.", e);
       dispatcher.getEventHandler().handle(new ContainerExitEvent(
@@ -308,7 +305,6 @@ public class ContainerLaunch implements Callable<Integer> {
     }
 
     handleContainerExitCode(ret, containerLogDir);
-
     return ret;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f6dfe02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
index c12858e..896ffd0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
@@ -35,10 +35,10 @@ import java.util.Map;
 
 /**
  * This class is a {@link ContainerRuntime} implementation that delegates all
- * operations to either a {@link DefaultLinuxContainerRuntime} instance or a
- * {@link DockerLinuxContainerRuntime} instance, depending on whether the
- * {@link DockerLinuxContainerRuntime} instance believes the operation to be
- * requesting a Docker container.
+ * operations to a {@link DefaultLinuxContainerRuntime} instance, a
+ * {@link DockerLinuxContainerRuntime} instance, or a
+ * {@link JavaSandboxLinuxContainerRuntime} instance depending on whether
+ * each instance believes the operation to be within its scope.
  *
  * @see DockerLinuxContainerRuntime#isDockerContainerRequested
  */
@@ -49,6 +49,7 @@ public class DelegatingLinuxContainerRuntime implements 
LinuxContainerRuntime {
       .getLog(DelegatingLinuxContainerRuntime.class);
   private DefaultLinuxContainerRuntime defaultLinuxContainerRuntime;
   private DockerLinuxContainerRuntime dockerLinuxContainerRuntime;
+  private JavaSandboxLinuxContainerRuntime javaSandboxLinuxContainerRuntime;
 
   @Override
   public void initialize(Configuration conf)
@@ -61,15 +62,20 @@ public class DelegatingLinuxContainerRuntime implements 
LinuxContainerRuntime {
     dockerLinuxContainerRuntime = new DockerLinuxContainerRuntime(
         privilegedOperationExecutor);
     dockerLinuxContainerRuntime.initialize(conf);
+    javaSandboxLinuxContainerRuntime = new JavaSandboxLinuxContainerRuntime(
+        privilegedOperationExecutor);
+    javaSandboxLinuxContainerRuntime.initialize(conf);
   }
 
-  private LinuxContainerRuntime pickContainerRuntime(Container container) {
-    Map<String, String> env = container.getLaunchContext().getEnvironment();
+  private LinuxContainerRuntime pickContainerRuntime(
+      Map<String, String> environment){
     LinuxContainerRuntime runtime;
 
-    if (DockerLinuxContainerRuntime.isDockerContainerRequested(env)){
+    if (DockerLinuxContainerRuntime.isDockerContainerRequested(environment)){
       runtime = dockerLinuxContainerRuntime;
-    } else  {
+    } else if (javaSandboxLinuxContainerRuntime.isSandboxContainerRequested()) 
{
+      runtime = javaSandboxLinuxContainerRuntime;
+    } else {
       runtime = defaultLinuxContainerRuntime;
     }
 
@@ -81,12 +87,14 @@ public class DelegatingLinuxContainerRuntime implements 
LinuxContainerRuntime {
     return runtime;
   }
 
+  private LinuxContainerRuntime pickContainerRuntime(Container container) {
+    return pickContainerRuntime(container.getLaunchContext().getEnvironment());
+  }
+
   @Override
   public void prepareContainer(ContainerRuntimeContext ctx)
       throws ContainerExecutionException {
-    Container container = ctx.getContainer();
-    LinuxContainerRuntime runtime = pickContainerRuntime(container);
-
+    LinuxContainerRuntime runtime = pickContainerRuntime(ctx.getContainer());
     runtime.prepareContainer(ctx);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f6dfe02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java
new file mode 100644
index 0000000..6dc627b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java
@@ -0,0 +1,495 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.
+    containermanager.linux.runtime;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.Groups;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
+import org.apache.log4j.Logger;
+
+import java.io.FilePermission;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.attribute.FileAttribute;
+import java.nio.file.attribute.PosixFilePermission;
+import java.nio.file.attribute.PosixFilePermissions;
+import java.security.AllPermission;
+import java.util.Formatter;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.apache.hadoop.fs.Path.SEPARATOR;
+import static org.apache.hadoop.util.Shell.SYSPROP_HADOOP_HOME_DIR;
+import static 
org.apache.hadoop.yarn.api.ApplicationConstants.Environment.JAVA_HOME;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_ID_STR;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_LOCAL_DIRS;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_RUN_CMDS;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.LOCALIZED_RESOURCES;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.USER;
+/**
+ * <p>This class extends the {@link DefaultLinuxContainerRuntime} specifically
+ * for containers which run Java commands.  It generates a new java security
+ * policy file per container and modifies the java command to enable the
+ * Java Security Manager with the generated policy.</p>
+ *
+ * The behavior of the {@link JavaSandboxLinuxContainerRuntime} can be modified
+ * using the following settings:
+ *
+ * <ul>
+ *   <li>
+ *     {@value YarnConfiguration#YARN_CONTAINER_SANDBOX} :
+ *     This yarn-site.xml setting has three options:
+ *     <ul>
+ *     <li>disabled - Default behavior. {@link LinuxContainerRuntime}
+ *     is disabled</li>
+ *     <li>permissive - JVM containers will run with Java Security Manager
+ *     enabled.  Non-JVM containers will run normally</li>
+ *     <li>enforcing - JVM containers will run with Java Security Manager
+ *     enabled.  Non-JVM containers will be prevented from executing and an
+ *     {@link ContainerExecutionException} will be thrown.</li>
+ *     </ul>
+ *   </li>
+ *   <li>
+ *     {@value YarnConfiguration#YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS} :
+ *     Determines the file permissions for the application directories.  The
+ *     permissions come in the form of comma separated values
+ *     (e.g. read,write,execute,delete). Defaults to {@code read} for 
read-only.
+ *   </li>
+ *   <li>
+ *     {@value YarnConfiguration#YARN_CONTAINER_SANDBOX_POLICY} :
+ *     Accepts canonical path to a java policy file on the local filesystem.
+ *     This file will be loaded as the base policy, any additional container
+ *     grants will be appended to this base file.  If not specified, the 
default
+ *     java.policy   file provided with hadoop resources will be used.
+ *   </li>
+ *   <li>
+ *     {@value YarnConfiguration#YARN_CONTAINER_SANDBOX_WHITELIST_GROUP} :
+ *     Optional setting to specify a YARN queue which will be exempt from the
+ *     sand-boxing process.
+ *   </li>
+ * </ul>
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class JavaSandboxLinuxContainerRuntime
+    extends DefaultLinuxContainerRuntime {
+  private static final Log LOG =
+      LogFactory.getLog(DefaultLinuxContainerRuntime.class);
+  private Configuration configuration;
+  private SandboxMode sandboxMode;
+
+  public static final String POLICY_FILE_DIR = "nm-sandbox-policies";
+
+  private static Path policyFileDir;
+  private static final FileAttribute<Set<PosixFilePermission>> POLICY_ATTR =
+      PosixFilePermissions.asFileAttribute(
+          PosixFilePermissions.fromString("rwxr-xr-x"));
+
+  private Map<String, Path> containerPolicies = new HashMap<>();
+
+  /**
+   * Create an instance using the given {@link PrivilegedOperationExecutor}
+   * instance for performing operations.
+   *
+   * @param privilegedOperationExecutor the {@link PrivilegedOperationExecutor}
+   * instance
+   */
+  public JavaSandboxLinuxContainerRuntime(
+      PrivilegedOperationExecutor privilegedOperationExecutor) {
+    super(privilegedOperationExecutor);
+  }
+
+  @Override
+  public void initialize(Configuration conf)
+      throws ContainerExecutionException {
+    this.configuration = conf;
+    this.sandboxMode =
+        SandboxMode.get(
+            this.configuration.get(YarnConfiguration.YARN_CONTAINER_SANDBOX,
+                YarnConfiguration.DEFAULT_YARN_CONTAINER_SANDBOX));
+
+    initializePolicyDir();
+
+    super.initialize(conf);
+  }
+
+  /**
+   * Initialize the Java Security Policy directory.  Either creates the
+   * directory if it doesn't exist, or clears the contents of the directory if
+   * already created.
+   * @throws ContainerExecutionException If unable to resolve policy directory
+   */
+  private void initializePolicyDir() throws ContainerExecutionException {
+    String hadoopTempDir = configuration.get("hadoop.tmp.dir");
+    if (hadoopTempDir == null) {
+      throw new ContainerExecutionException("hadoop.tmp.dir not set!");
+    }
+    policyFileDir = Paths.get(hadoopTempDir, POLICY_FILE_DIR);
+    //Delete any existing policy files if the directory has already been 
created
+    if(Files.exists(policyFileDir)){
+      try (DirectoryStream<Path> stream =
+         Files.newDirectoryStream(policyFileDir)){
+        for(Path policyFile : stream){
+          Files.delete(policyFile);
+        }
+      }catch(IOException e){
+        throw new ContainerExecutionException("Unable to initialize policy "
+            + "directory: " + e);
+      }
+    } else {
+      try {
+        policyFileDir = Files.createDirectories(
+            Paths.get(hadoopTempDir, POLICY_FILE_DIR), POLICY_ATTR);
+      } catch (IOException e) {
+        throw new ContainerExecutionException("Unable to create policy file " +
+            "directory: " + e);
+      }
+    }
+  }
+
+  /**
+   *  Prior to environment from being written locally need to generate
+   *  policy file which limits container access to a small set of directories.
+   *  Additionally the container run command needs to be modified to include
+   *  flags to enable the java security manager with the generated policy.
+   *  <br>
+   *  The Java Sandbox will be circumvented if the user is a member of the
+   *  group specified in:
+   *  {@value YarnConfiguration#YARN_CONTAINER_SANDBOX_WHITELIST_GROUP} and if
+   *  they do not include the JVM flag:
+   *  {@value NMContainerPolicyUtils#SECURITY_FLAG}
+   *
+   * @param ctx The {@link ContainerRuntimeContext} containing container
+   *            setup properties.
+   * @throws ContainerExecutionException Exception thrown if temporary policy
+   * file directory can't be created, or if any exceptions occur during policy
+   * file parsing and generation.
+   */
+  @Override
+  public void prepareContainer(ContainerRuntimeContext ctx)
+      throws ContainerExecutionException {
+
+    @SuppressWarnings("unchecked")
+    List<String> localDirs =
+        ctx.getExecutionAttribute(CONTAINER_LOCAL_DIRS);
+    @SuppressWarnings("unchecked")
+    Map<org.apache.hadoop.fs.Path, List<String>> resources =
+        ctx.getExecutionAttribute(LOCALIZED_RESOURCES);
+    @SuppressWarnings("unchecked")
+    List<String> commands =
+        ctx.getExecutionAttribute(CONTAINER_RUN_CMDS);
+    Map<String, String> env =
+        ctx.getContainer().getLaunchContext().getEnvironment();
+
+    if(!isSandboxContainerWhitelisted(ctx, commands)) {
+      String tmpDirBase = configuration.get("hadoop.tmp.dir");
+      if (tmpDirBase == null) {
+        throw new ContainerExecutionException("hadoop.tmp.dir not set!");
+      }
+
+      OutputStream policyOutputStream = null;
+      try {
+        String containerID = ctx.getExecutionAttribute(CONTAINER_ID_STR);
+
+        Path policyFilePath = Files.createFile(
+            Paths.get(policyFileDir.toString(),
+            containerID + "-" + NMContainerPolicyUtils.POLICY_FILE),
+            POLICY_ATTR);
+        policyOutputStream = Files.newOutputStream(policyFilePath);
+
+        containerPolicies.put(containerID, policyFilePath);
+
+        NMContainerPolicyUtils.generatePolicyFile(
+            policyOutputStream, localDirs, resources, configuration);
+        NMContainerPolicyUtils.appendSecurityFlags(
+            commands, env, policyFilePath, sandboxMode);
+
+      } catch (Exception e) {
+        throw new ContainerExecutionException(e);
+      } finally {
+        IOUtils.cleanup(LOG, policyOutputStream);
+      }
+    }
+  }
+
+  @Override
+  public void launchContainer(ContainerRuntimeContext ctx)
+      throws ContainerExecutionException {
+    try {
+      super.launchContainer(ctx);
+    } finally {
+      deletePolicyFiles(ctx);
+    }
+  }
+
+  /**
+   * Determine if JVMSandboxLinuxContainerRuntime should be used.  This is
+   * decided based on the value of
+   * {@value YarnConfiguration#YARN_CONTAINER_SANDBOX}
+   * @return true if Sandbox is requested, false otherwise
+   */
+  boolean isSandboxContainerRequested() {
+    return sandboxMode != SandboxMode.disabled;
+  }
+
+  /**
+   * Determine if the container should be whitelisted (i.e. exempt from the
+   * Java Security Manager).
+   * @param ctx The container runtime context for the requested container
+   * @param commands The list of run commands for the container
+   * @return boolean value denoting whether the container should be 
whitelisted.
+   * @throws ContainerExecutionException If container user can not be resolved
+   */
+  private boolean isSandboxContainerWhitelisted(ContainerRuntimeContext ctx,
+      List<String> commands) throws ContainerExecutionException {
+    String whitelistGroup = configuration.get(
+        YarnConfiguration.YARN_CONTAINER_SANDBOX_WHITELIST_GROUP);
+    Groups groups = Groups.getUserToGroupsMappingService(configuration);
+    List<String> userGroups;
+    boolean isWhitelisted = false;
+
+    try {
+      userGroups = groups.getGroups(ctx.getExecutionAttribute(USER));
+    } catch (IOException e) {
+      throw new ContainerExecutionException("Container user does not exist");
+    }
+
+    if(whitelistGroup != null && userGroups.contains(whitelistGroup)) {
+      // If any command has security flag, whitelisting is disabled
+      for(String cmd : commands) {
+        if(cmd.contains(NMContainerPolicyUtils.SECURITY_FLAG)){
+          isWhitelisted = false;
+          break;
+        } else {
+          isWhitelisted = true;
+        }
+      }
+    }
+    return isWhitelisted;
+  }
+
+  /**
+   * Deletes policy files for container specified by parameter.  Additionally
+   * this method will age off any stale policy files generated by
+   * {@link JavaSandboxLinuxContainerRuntime}
+   * @param ctx Container context for files to be deleted
+   * @throws ContainerExecutionException if unable to access or delete policy
+   * files or generated policy file directory
+   */
+  private void deletePolicyFiles(ContainerRuntimeContext ctx)
+      throws ContainerExecutionException {
+    try {
+      Files.delete(containerPolicies.remove(
+          ctx.getExecutionAttribute(CONTAINER_ID_STR)));
+    } catch (IOException e) {
+      throw new ContainerExecutionException("Unable to delete policy file: "
+          + e);
+    }
+  }
+
+  /**
+   * Enumeration of the modes the JavaSandboxLinuxContainerRuntime can use.
+   * See {@link JavaSandboxLinuxContainerRuntime} for details on the
+   * behavior of each setting.
+   */
+  public enum SandboxMode {
+    enforcing("enforcing"),
+    permissive("permissive"),
+    disabled("disabled");
+
+    private final String mode;
+    SandboxMode(String mode){
+      this.mode = mode;
+    }
+
+    public static SandboxMode get(String mode) {
+
+      if(enforcing.mode.equals(mode)) {
+        return enforcing;
+      } else if(permissive.mode.equals(mode)) {
+        return permissive;
+      } else {
+        return disabled;
+      }
+    }
+
+    public String toString(){
+      return mode;
+    }
+  }
+
+  /**
+   * Static utility class defining String constants and static methods for the
+   * use of the {@link JavaSandboxLinuxContainerRuntime}.
+   */
+  static final class NMContainerPolicyUtils{
+
+    static final String POLICY_FILE = "java.policy";
+    static final String SECURITY_DEBUG = " -Djava.security.debug=all";
+    static final String SECURITY_FLAG = "-Djava.security.manager";
+    static final String POLICY_APPEND_FLAG = "-Djava.security.policy=";
+    static final String POLICY_FLAG = POLICY_APPEND_FLAG + "=";
+    static final String JAVA_CMD = "/bin/java ";
+    static final String JVM_SECURITY_CMD =
+        JAVA_CMD + SECURITY_FLAG + " " + POLICY_FLAG;
+
+    static final String STRIP_POLICY_FLAG = POLICY_APPEND_FLAG + "[^ ]+";
+    static final String CONTAINS_JAVA_CMD = "\\$" + JAVA_HOME + JAVA_CMD + 
".*";
+    static final String CHAINED_COMMAND_REGEX =
+        "^.*(&&.+$)|(\\|\\|.+$).*$";  //Matches any occurrences of '||' or '&&'
+    static final String CLEAN_CMD_REGEX =
+        "(" + SECURITY_FLAG + ")|" +
+            "(" + STRIP_POLICY_FLAG + ")";
+
+    static final String FILE_PERMISSION_FORMAT = "   permission "
+        + FilePermission.class.getCanonicalName()
+        + " \"%1$s" + SEPARATOR + "-\", \"%2$s\";%n";
+    static final String HADOOP_HOME_PERMISSION = "%ngrant codeBase \"file:"
+        + Paths.get(System.getProperty(SYSPROP_HADOOP_HOME_DIR))
+        + SEPARATOR + "-\" {%n" +
+        "  permission " + AllPermission.class.getCanonicalName() + ";%n};%n";
+    static final Logger LOG =
+        Logger.getLogger(NMContainerPolicyUtils.class);
+
+    /**
+     * Write new policy file to policyOutStream which will include read access
+     * to localize resources.  Optionally a default policyFilePath can be
+     * specified to append a custom policy implementation to the new policy 
file
+     * @param policyOutStream OutputStream pointing to java.policy file
+     * @param localDirs Container local directories
+     * @param resources List of local container resources
+     * @param conf YARN configuration
+     * @throws IOException - If policy file generation is unable to read the
+     * base policy file or if it is unable to create a new policy file.
+     */
+    static void generatePolicyFile(OutputStream policyOutStream,
+        List<String> localDirs, Map<org.apache.hadoop.fs.Path,
+        List<String>> resources, Configuration conf)
+        throws IOException {
+
+      String policyFilePath =
+          conf.get(YarnConfiguration.YARN_CONTAINER_SANDBOX_POLICY);
+      String filePermissions =
+          conf.get(YarnConfiguration.YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS,
+            YarnConfiguration.DEFAULT_YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS);
+
+      Set<String> cacheDirs = new HashSet<>();
+      for(org.apache.hadoop.fs.Path path : resources.keySet()) {
+        cacheDirs.add(path.getParent().toString());
+      }
+
+      if(policyFilePath == null) {
+        IOUtils.copyBytes(
+            NMContainerPolicyUtils.class.getResourceAsStream("/" + 
POLICY_FILE),
+            policyOutStream, conf, false);
+      } else {
+        Files.copy(Paths.get(policyFilePath), policyOutStream);
+        policyOutStream.flush();
+      }
+
+      Formatter filePermissionFormat = new Formatter(policyOutStream,
+          StandardCharsets.UTF_8.name());
+      filePermissionFormat.format(HADOOP_HOME_PERMISSION);
+      filePermissionFormat.format("grant {%n");
+      for(String localDir : localDirs) {
+        filePermissionFormat.format(
+            FILE_PERMISSION_FORMAT, localDir, filePermissions);
+      }
+      for(String cacheDir : cacheDirs) {
+        filePermissionFormat.format(
+            FILE_PERMISSION_FORMAT, cacheDir, filePermissions);
+      }
+      filePermissionFormat.format("};%n");
+      filePermissionFormat.flush();
+    }
+
+    /**
+     * Modify command to enable the Java Security Manager and specify
+     * java.policy file.  Will modify the passed commands to strip any
+     * existing java security configurations.  Expects a java command to be the
+     * first and only executable provided in enforcing mode.  In passive mode
+     * any commands with '||' or '&&' will not be modified.
+     * @param commands List of container commands
+     * @param env Container environment variables
+     * @param policyPath Path to the container specific policy file
+     * @param sandboxMode (enforcing, permissive, disabled) Determines
+     *          whether non-java containers will be launched
+     * @throws ContainerExecutionException - Exception thrown if
+     * JVM Sandbox enabled in 'enforcing' mode and a non-java command is
+     * provided in the list of commands
+     */
+    static void appendSecurityFlags(List<String> commands,
+        Map<String, String> env, Path policyPath, SandboxMode sandboxMode)
+        throws ContainerExecutionException {
+
+      for(int i = 0; i < commands.size(); i++){
+        String command = commands.get(i);
+        if(validateJavaHome(env.get(JAVA_HOME.name()))
+            && command.matches(CONTAINS_JAVA_CMD)
+            && !command.matches(CHAINED_COMMAND_REGEX)){
+          command = command.replaceAll(CLEAN_CMD_REGEX, "");
+          String securityString = JVM_SECURITY_CMD + policyPath + " ";
+          if(LOG.isDebugEnabled()) {
+            securityString += SECURITY_DEBUG;
+          }
+          commands.set(i, command.replaceFirst(JAVA_CMD, securityString));
+        } else if (sandboxMode == SandboxMode.enforcing){
+          throw new ContainerExecutionException(
+              "Only JVM containers permitted in YARN sandbox mode (enforcing). 
"
+            + "The following command can not be executed securely: " + 
command);
+        } else if (sandboxMode == SandboxMode.permissive){
+          LOG.warn("The container will run without the java security manager"
+              + " due to an unsupported container command.  The command"
+              + " will be permitted to run in Sandbox permissive mode: "
+              + command);
+        }
+      }
+    }
+
+    private static boolean validateJavaHome(String containerJavaHome)
+        throws ContainerExecutionException{
+      if (System.getenv(JAVA_HOME.name()) == null){
+        throw new ContainerExecutionException(
+            "JAVA_HOME is not set for NodeManager");
+      }
+      if (containerJavaHome == null) {
+        throw new ContainerExecutionException(
+            "JAVA_HOME is not set for container");
+      }
+      return System.getenv(JAVA_HOME.name()).equals(containerJavaHome);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f6dfe02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java
index 0c1ec3e..2e632fa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java
@@ -67,6 +67,8 @@ public final class LinuxContainerRuntimeConstants {
       String.class, "resources_options");
   public static final Attribute<String> TC_COMMAND_FILE = Attribute.attribute(
       String.class, "tc_command_file");
+  public static final Attribute<List> CONTAINER_RUN_CMDS = Attribute.attribute(
+      List.class, "container_run_cmds");
   public static final Attribute<String> CGROUP_RELATIVE_PATH = Attribute
       .attribute(String.class, "cgroup_relative_path");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f6dfe02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerPrepareContext.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerPrepareContext.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerPrepareContext.java
new file mode 100644
index 0000000..f711da2
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerPrepareContext.java
@@ -0,0 +1,119 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.executor;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.Path;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Encapsulates information required for preparing containers.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class ContainerPrepareContext {
+  private final Container container;
+  private final Map<Path, List<String>> localizedResources;
+  private final String user;
+  private final List<String> containerLocalDirs;
+  private final List<String> commands;
+
+  /**
+   * Builder for ContainerPrepareContext.
+   */
+  public static final class Builder {
+    private Container container;
+    private Map<Path, List<String>> localizedResources;
+    private String user;
+    private List<String> containerLocalDirs;
+    private List<String> commands;
+
+    public Builder() {
+    }
+
+    public ContainerPrepareContext.Builder setContainer(Container container) {
+      this.container = container;
+      return this;
+    }
+
+    public ContainerPrepareContext.Builder setLocalizedResources(Map<Path,
+        List<String>> localizedResources) {
+      this.localizedResources = localizedResources;
+      return this;
+    }
+
+    public ContainerPrepareContext.Builder setUser(String user) {
+      this.user = user;
+      return this;
+    }
+    public ContainerPrepareContext.Builder setContainerLocalDirs(
+        List<String> containerLocalDirs) {
+      this.containerLocalDirs = containerLocalDirs;
+      return this;
+    }
+
+    public ContainerPrepareContext build() {
+      return new ContainerPrepareContext(this);
+    }
+
+    public ContainerPrepareContext.Builder setCommands(List<String> commands) {
+      this.commands = commands;
+      return this;
+    }
+  }
+
+  private ContainerPrepareContext(ContainerPrepareContext.Builder builder) {
+    this.container = builder.container;
+    this.localizedResources = builder.localizedResources;
+    this.user = builder.user;
+    this.containerLocalDirs = builder.containerLocalDirs;
+    this.commands = builder.commands;
+  }
+
+  public Container getContainer() {
+    return this.container;
+  }
+
+  public Map<Path, List<String>> getLocalizedResources() {
+    if (this.localizedResources != null) {
+      return Collections.unmodifiableMap(this.localizedResources);
+    } else {
+      return null;
+    }
+  }
+
+  public String getUser() {
+    return this.user;
+  }
+
+  public List<String> getContainerLocalDirs() {
+    return Collections.unmodifiableList(this.containerLocalDirs);
+  }
+
+  public List<String> getCommands(){
+    return this.commands;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f6dfe02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/java.policy
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/java.policy
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/java.policy
new file mode 100644
index 0000000..d9320c2
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/java.policy
@@ -0,0 +1,63 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+// Standard extensions get all permissions by default
+grant codeBase "file:${{java.ext.dirs}}/*" {
+    permission java.security.AllPermission;
+};
+
+// default permissions granted to all domains
+grant {
+    permission java.lang.RuntimePermission "accessDeclaredMembers";
+
+    permission java.util.PropertyPermission "java.version", "read";
+    permission java.util.PropertyPermission "java.vendor", "read";
+    permission java.util.PropertyPermission "java.vendor.url", "read";
+    permission java.util.PropertyPermission "java.class.version", "read";
+    permission java.util.PropertyPermission "os.name", "read";
+    permission java.util.PropertyPermission "os.version", "read";
+    permission java.util.PropertyPermission "os.arch", "read";
+    permission java.util.PropertyPermission "file.separator", "read";
+    permission java.util.PropertyPermission "path.separator", "read";
+    permission java.util.PropertyPermission "line.separator", "read";
+
+    permission java.util.PropertyPermission "java.specification.version", 
"read";
+    permission java.util.PropertyPermission "java.specification.vendor", 
"read";
+    permission java.util.PropertyPermission "java.specification.name", "read";
+
+    permission java.util.PropertyPermission "java.vm.specification.version", 
"read";
+    permission java.util.PropertyPermission "java.vm.specification.vendor", 
"read";
+    permission java.util.PropertyPermission "java.vm.specification.name", 
"read";
+    permission java.util.PropertyPermission "java.vm.version", "read";
+    permission java.util.PropertyPermission "java.vm.vendor", "read";
+    permission java.util.PropertyPermission "java.vm.name", "read";
+
+    //additional hadoop permissions
+    permission java.util.PropertyPermission "awt.Toolkit", "read";
+    permission java.util.PropertyPermission "file.encoding", "read";
+    permission java.util.PropertyPermission "file.encoding.pkg", "read";
+    permission java.util.PropertyPermission "hadoop.metrics.log.level", "read";
+    permission java.util.PropertyPermission "hadoop.root.logger", "read";
+    permission java.util.PropertyPermission "java.awt.graphicsenv" ,"read";
+    permission java.util.PropertyPermission "java.awt.printerjob", "read";
+    permission java.util.PropertyPermission "java.class.path", "read";
+    permission java.util.PropertyPermission "yarn.app.container.log.dir", 
"read";
+    permission java.util.PropertyPermission "yarn.app.container.log.filesize", 
"read";
+    permission java.lang.RuntimePermission "loadLibrary.gplcompression";
+    permission javax.security.auth.AuthPermission "getSubject";
+};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f6dfe02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestJavaSandboxLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestJavaSandboxLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestJavaSandboxLinuxContainerRuntime.java
new file mode 100644
index 0000000..e482c8d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestJavaSandboxLinuxContainerRuntime.java
@@ -0,0 +1,364 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.
+    containermanager.linux.runtime;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FilePermission;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static 
org.apache.hadoop.yarn.api.ApplicationConstants.Environment.JAVA_HOME;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils.CHAINED_COMMAND_REGEX;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils.CLEAN_CMD_REGEX;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils.CONTAINS_JAVA_CMD;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils.POLICY_FILE;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils.POLICY_FLAG;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils.SECURITY_FLAG;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.JavaSandboxLinuxContainerRuntime.POLICY_FILE_DIR;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.APPID;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_ID_STR;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_LOCAL_DIRS;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_RUN_CMDS;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_WORK_DIR;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.FILECACHE_DIRS;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.LOCALIZED_RESOURCES;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.LOCAL_DIRS;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.LOG_DIRS;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.RUN_AS_USER;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.USER;
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.USER_LOCAL_DIRS;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test policy file generation and policy enforcement for the
+ * {@link JavaSandboxLinuxContainerRuntime}.
+ */
+public class TestJavaSandboxLinuxContainerRuntime {
+
+  private static final String HADOOP_HOME = "hadoop.home.dir";
+  private static String hadoopHomeDir = System.getProperty(HADOOP_HOME);
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+
+  private static File grantFile, denyFile, policyFile,
+          grantDir, denyDir, containerDir;
+  private static java.nio.file.Path policyFilePath;
+  private static SecurityManager securityManager;
+  private Map<Path, List<String>> resources;
+  private Map<String, String> env;
+  private List<String> whitelistGroup;
+
+  private PrivilegedOperationExecutor mockExecutor;
+  private JavaSandboxLinuxContainerRuntime runtime;
+  private ContainerRuntimeContext.Builder runtimeContextBuilder;
+  private Configuration conf;
+
+  private final static String NORMAL_USER = System.getProperty("user.name");
+  private final static String NORMAL_GROUP = "normalGroup";
+  private final static String WHITELIST_USER = "picard";
+  private final static String WHITELIST_GROUP = "captains";
+  private final static String CONTAINER_ID = "container_1234567890";
+  private final static String APPLICATION_ID = "application_1234567890";
+
+  @Before
+  public void setup() throws Exception {
+
+    File baseTestDirectory = new File(System.getProperty("test.build.data",
+        System.getProperty("java.io.tmpdir", "target")),
+        TestJavaSandboxLinuxContainerRuntime.class.getName());
+
+    whitelistGroup = new ArrayList<>();
+    whitelistGroup.add(WHITELIST_GROUP);
+
+    conf = new Configuration();
+    conf.set(CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES,
+        WHITELIST_USER + "=" + WHITELIST_GROUP + ";"
+            + NORMAL_USER + "=" + NORMAL_GROUP + ";");
+    conf.set(YarnConfiguration.YARN_CONTAINER_SANDBOX_WHITELIST_GROUP,
+        WHITELIST_GROUP);
+    conf.set("hadoop.tmp.dir", baseTestDirectory.getAbsolutePath());
+
+    Files.deleteIfExists(Paths.get(baseTestDirectory.getAbsolutePath(),
+        POLICY_FILE_DIR, CONTAINER_ID + "-" + POLICY_FILE));
+
+    mockExecutor = mock(PrivilegedOperationExecutor.class);
+    runtime = new JavaSandboxLinuxContainerRuntime(mockExecutor);
+    runtime.initialize(conf);
+
+    resources = new HashMap<>();
+    grantDir = new File(baseTestDirectory, "grantDir");
+    denyDir = new File(baseTestDirectory, "denyDir");
+    containerDir = new File(baseTestDirectory,
+        APPLICATION_ID + Path.SEPARATOR + CONTAINER_ID);
+    grantDir.mkdirs();
+    denyDir.mkdirs();
+    containerDir.mkdirs();
+
+    grantFile = File.createTempFile("grantFile", "tmp", grantDir);
+    denyFile = File.createTempFile("denyFile", "tmp", denyDir);
+
+    List<String> symLinks = new ArrayList<>();
+    symLinks.add(grantFile.getName());
+    resources.put(new Path(grantFile.getCanonicalPath()), symLinks);
+
+    env = new HashMap();
+    env.put(JAVA_HOME.name(), System.getenv(JAVA_HOME.name()));
+
+    policyFile = File.createTempFile("java", "policy", containerDir);
+    policyFilePath = Paths.get(policyFile.getAbsolutePath());
+
+    runtimeContextBuilder = createRuntimeContext();
+
+    if (hadoopHomeDir == null) {
+      System.setProperty(HADOOP_HOME, policyFile.getParent());
+    }
+
+    OutputStream outStream = new FileOutputStream(policyFile);
+    JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils
+        .generatePolicyFile(outStream, symLinks, resources, conf);
+    outStream.close();
+
+    System.setProperty("java.security.policy", policyFile.getCanonicalPath());
+    securityManager = new SecurityManager();
+
+  }
+
+  public  ContainerRuntimeContext.Builder createRuntimeContext(){
+
+    Container container = mock(Container.class);
+    ContainerLaunchContext  ctx = mock(ContainerLaunchContext.class);
+
+    when(container.getLaunchContext()).thenReturn(ctx);
+    when(ctx.getEnvironment()).thenReturn(env);
+
+    ContainerRuntimeContext.Builder builder =
+        new ContainerRuntimeContext.Builder(container);
+
+    List<String> localDirs = new ArrayList<>();
+
+    builder.setExecutionAttribute(LOCALIZED_RESOURCES, resources)
+        .setExecutionAttribute(RUN_AS_USER, NORMAL_USER)
+        .setExecutionAttribute(CONTAINER_ID_STR, CONTAINER_ID)
+        .setExecutionAttribute(APPID, APPLICATION_ID)
+        .setExecutionAttribute(CONTAINER_WORK_DIR,
+            new Path(containerDir.toString()))
+        .setExecutionAttribute(LOCAL_DIRS, localDirs)
+        .setExecutionAttribute(LOG_DIRS, localDirs)
+        .setExecutionAttribute(FILECACHE_DIRS, localDirs)
+        .setExecutionAttribute(USER_LOCAL_DIRS, localDirs)
+        .setExecutionAttribute(CONTAINER_LOCAL_DIRS, localDirs)
+        .setExecutionAttribute(CONTAINER_RUN_CMDS, localDirs);
+
+    return builder;
+  }
+
+  @Test
+  public void testGrant() throws Exception {
+    FilePermission grantPermission =
+        new FilePermission(grantFile.getAbsolutePath(), "read");
+    securityManager.checkPermission(grantPermission);
+  }
+
+  @Test
+  public void testDeny() throws Exception {
+    FilePermission denyPermission =
+        new FilePermission(denyFile.getAbsolutePath(), "read");
+    exception.expect(java.security.AccessControlException.class);
+    securityManager.checkPermission(denyPermission);
+  }
+
+  @Test
+  public void testEnforcingMode() throws ContainerExecutionException {
+    String[] nonJavaCommands = {
+        "bash malicious_script.sh",
+        "python malicious_script.py"
+    };
+
+    List<String> commands = Arrays.asList(nonJavaCommands);
+    exception.expect(ContainerExecutionException.class);
+    JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils
+        .appendSecurityFlags(commands, env, policyFilePath,
+            JavaSandboxLinuxContainerRuntime.SandboxMode.enforcing);
+
+  }
+
+  @Test
+  public void testPermissiveMode() throws ContainerExecutionException {
+    String[] nonJavaCommands = {
+        "bash non-java-script.sh",
+        "python non-java-script.py"
+    };
+
+    List<String> commands = Arrays.asList(nonJavaCommands);
+    JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils
+        .appendSecurityFlags(commands, env, policyFilePath,
+              JavaSandboxLinuxContainerRuntime.SandboxMode.permissive);
+
+  }
+
+  @Test
+  public void testDisabledSandboxWithWhitelist()
+      throws ContainerExecutionException {
+
+    String[] inputCommand = {
+        "java jar MyJob.jar"
+    };
+    List<String> commands = Arrays.asList(inputCommand);
+
+    runtimeContextBuilder.setExecutionAttribute(USER, WHITELIST_USER);
+    runtimeContextBuilder.setExecutionAttribute(CONTAINER_RUN_CMDS, commands);
+    runtime.prepareContainer(runtimeContextBuilder.build());
+
+    Assert.assertTrue("Command should not be modified when user is " +
+            "member of whitelisted group",
+        inputCommand[0].equals(commands.get(0)));
+
+  }
+
+  @Test
+  public void testEnabledSandboxWithWhitelist()
+      throws ContainerExecutionException{
+    String[] inputCommand = {
+        "$JAVA_HOME/bin/java jar -Djava.security.manager MyJob.jar"
+    };
+    List<String> commands = Arrays.asList(inputCommand);
+
+    runtimeContextBuilder.setExecutionAttribute(USER, WHITELIST_USER);
+    runtimeContextBuilder.setExecutionAttribute(CONTAINER_RUN_CMDS, commands);
+    runtime.prepareContainer(runtimeContextBuilder.build());
+
+    Assert.assertTrue("Command should be modified to include " +
+            "policy file in whitelisted Sandbox mode",
+        commands.get(0).contains(SECURITY_FLAG)
+        && commands.get(0).contains(POLICY_FLAG));
+  }
+
+  @Test
+  public void testDeniedWhitelistGroup() throws ContainerExecutionException {
+
+    String[] inputCommand = {
+        "$JAVA_HOME/bin/java jar MyJob.jar"
+    };
+    List<String> commands = Arrays.asList(inputCommand);
+
+    runtimeContextBuilder.setExecutionAttribute(USER, NORMAL_USER);
+    runtimeContextBuilder.setExecutionAttribute(CONTAINER_RUN_CMDS, commands);
+    runtime.prepareContainer(runtimeContextBuilder.build());
+
+    Assert.assertTrue("Java security manager must be enabled for "
+            + "unauthorized users",
+        commands.get(0).contains(SECURITY_FLAG));
+  }
+
+  @Test
+  public void testChainedCmdRegex(){
+    Assert.assertTrue("cmd1 && cmd2 || cmd3".matches(CHAINED_COMMAND_REGEX));
+    Assert.assertFalse("cmd1 &> logfile".matches(CHAINED_COMMAND_REGEX));
+  }
+
+  @Test
+  public void testContainsJavaRegex(){
+    String[] javaCmds = {
+        "$JAVA_HOME/bin/java -cp App.jar AppClass",
+        "$JAVA_HOME/bin/java -jar App.jar AppClass &> logfile"
+    };
+    String[] nonJavaCmds = {
+        "$JAVA_HOME/bin/jajavava -cp App.jar AppClass",
+        "/nm/app/container/usercache/badjava -cp Bad.jar ChaosClass"
+    };
+    for(String javaCmd : javaCmds) {
+      Assert.assertTrue(javaCmd.matches(CONTAINS_JAVA_CMD));
+    }
+    for(String nonJavaCmd : nonJavaCmds) {
+      Assert.assertFalse(nonJavaCmd.matches(CONTAINS_JAVA_CMD));
+    }
+  }
+
+  @Test
+  public void testCleanCmdRegex(){
+    String[] securityManagerCmds = {
+        "/usr/bin/java -Djava.security.manager -cp $CLASSPATH $MainClass",
+        "-Djava.security.manager -Djava.security.policy==testpolicy keepThis"
+    };
+    String[] cleanedCmdsResult = {
+        "/usr/bin/java  -cp $CLASSPATH $MainClass",
+        "keepThis"
+    };
+    for(int i = 0; i < securityManagerCmds.length; i++){
+      Assert.assertEquals(
+          securityManagerCmds[i].replaceAll(CLEAN_CMD_REGEX, "").trim(),
+          cleanedCmdsResult[i]);
+    }
+  }
+
+  @Test
+  public void testAppendSecurityFlags() throws ContainerExecutionException {
+    String securityString = "-Djava.security.manager -Djava.security.policy=="
+        + policyFile.getAbsolutePath();
+    String[] badCommands = {
+        "$JAVA_HOME/bin/java -Djava.security.manager "
+            + "-Djava.security.policy=/home/user/java.policy",
+        "$JAVA_HOME/bin/java -cp MyApp.jar MrAppMaster"
+    };
+    String[] cleanCommands = {
+        "$JAVA_HOME/bin/java " + securityString,
+        "$JAVA_HOME/bin/java " + securityString
+            + " -cp MyApp.jar MrAppMaster"
+    };
+
+    List<String> commands = Arrays.asList(badCommands);
+    JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils
+        .appendSecurityFlags(commands, env, policyFilePath,
+            JavaSandboxLinuxContainerRuntime.SandboxMode.enforcing);
+
+    for(int i = 0; i < commands.size(); i++) {
+      
Assert.assertTrue(commands.get(i).trim().equals(cleanCommands[i].trim()));
+    }
+  }
+
+  @After
+  public void cleanup(){
+    System.setSecurityManager(null);
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to