YARN-4017. container-executor overuses PATH_MAX. Contributed by Sidharta 
Seethana

(cherry picked from commit 5b6bae00942c495e4be6ea2b8eb0676a48468dc6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34b81428
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34b81428
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34b81428

Branch: refs/heads/branch-2
Commit: 34b8142802eb4af99b3bb7586bc2f8a47f6d73c7
Parents: 50253c4
Author: Varun Vasudev <vvasu...@apache.org>
Authored: Tue Oct 13 12:48:30 2015 +0530
Committer: Varun Vasudev <vvasu...@apache.org>
Committed: Tue Oct 13 12:50:28 2015 +0530

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  2 +
 .../container-executor/impl/configuration.c     |  4 +-
 .../container-executor/impl/configuration.h     |  4 ++
 .../impl/container-executor.c                   | 40 ++++++++++----------
 4 files changed, 28 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34b81428/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d2d00e5..e070d76 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -877,6 +877,8 @@ Release 2.8.0 - UNRELEASED
     YARN-4230. RM crashes with NPE when increasing container resource if there 
is no headroom left.
     (Meng Ding via jianhe)
 
+    YARN-4017. container-executor overuses PATH_MAX. (Sidharta Seethana via 
vvasudev)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34b81428/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
index 373dbfd..94d81f4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
@@ -95,12 +95,12 @@ static int is_only_root_writable(const char *file) {
  */
 char *resolve_config_path(const char* file_name, const char *root) {
   const char *real_fname = NULL;
-  char buffer[PATH_MAX*2 + 1];
+  char buffer[EXECUTOR_PATH_MAX*2 + 1];
 
   if (file_name[0] == '/') {
     real_fname = file_name;
   } else if (realpath(root, buffer) != NULL) {
-    strncpy(strrchr(buffer, '/') + 1, file_name, PATH_MAX);
+    strncpy(strrchr(buffer, '/') + 1, file_name, EXECUTOR_PATH_MAX);
     real_fname = buffer;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34b81428/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
index 390a5b5..de5cc1d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
@@ -18,6 +18,10 @@
 
 #include <stddef.h>
 
+/** Define a platform-independent constant instead of using PATH_MAX */
+
+#define EXECUTOR_PATH_MAX 4096
+
 /**
  * Ensure that the configuration file and all of the containing directories
  * are only writable by root. Otherwise, an attacker can change the 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34b81428/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 2ffac43..9feba1f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -68,17 +68,17 @@ void set_nm_uid(uid_t user, gid_t group) {
  * get the executable filename.
  */
 char* get_executable() {
-  char buffer[PATH_MAX];
-  snprintf(buffer, PATH_MAX, "/proc/%" PRId64 "/exe", (int64_t)getpid());
-  char *filename = malloc(PATH_MAX);
-  ssize_t len = readlink(buffer, filename, PATH_MAX);
+  char buffer[EXECUTOR_PATH_MAX];
+  snprintf(buffer, EXECUTOR_PATH_MAX, "/proc/%" PRId64 "/exe", 
(int64_t)getpid());
+  char *filename = malloc(EXECUTOR_PATH_MAX);
+  ssize_t len = readlink(buffer, filename, EXECUTOR_PATH_MAX);
   if (len == -1) {
     fprintf(ERRORFILE, "Can't get executable name from %s - %s\n", buffer,
             strerror(errno));
     exit(-1);
-  } else if (len >= PATH_MAX) {
+  } else if (len >= EXECUTOR_PATH_MAX) {
     fprintf(ERRORFILE, "Executable name %.*s is longer than %d characters.\n",
-            PATH_MAX, filename, PATH_MAX);
+            EXECUTOR_PATH_MAX, filename, EXECUTOR_PATH_MAX);
     exit(-1);
   }
   filename[len] = '\0';
@@ -1060,8 +1060,8 @@ char* parse_docker_command_file(const char* command_file) 
{
 int run_docker(const char *command_file) {
   char* docker_command = parse_docker_command_file(command_file);
   char* docker_binary = get_value(DOCKER_BINARY_KEY);
-  char* docker_command_with_binary = calloc(sizeof(char), PATH_MAX);
-  snprintf(docker_command_with_binary, PATH_MAX, "%s %s", docker_binary, 
docker_command);
+  char* docker_command_with_binary = calloc(sizeof(char), EXECUTOR_PATH_MAX);
+  snprintf(docker_command_with_binary, EXECUTOR_PATH_MAX, "%s %s", 
docker_binary, docker_command);
   char **args = extract_values_delim(docker_command_with_binary, " ");
 
   int exit_code = -1;
@@ -1207,11 +1207,11 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
   char *script_file_dest = NULL;
   char *cred_file_dest = NULL;
   char *exit_code_file = NULL;
-  char docker_command_with_binary[PATH_MAX];
-  char docker_wait_command[PATH_MAX];
-  char docker_logs_command[PATH_MAX];
-  char docker_inspect_command[PATH_MAX];
-  char docker_rm_command[PATH_MAX];
+  char docker_command_with_binary[EXECUTOR_PATH_MAX];
+  char docker_wait_command[EXECUTOR_PATH_MAX];
+  char docker_logs_command[EXECUTOR_PATH_MAX];
+  char docker_inspect_command[EXECUTOR_PATH_MAX];
+  char docker_rm_command[EXECUTOR_PATH_MAX];
   int container_file_source =-1;
   int cred_file_source = -1;
   int BUFFER_SIZE = 4096;
@@ -1256,7 +1256,7 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
     goto cleanup;
   }
 
-  snprintf(docker_command_with_binary, PATH_MAX, "%s %s", docker_binary, 
docker_command);
+  snprintf(docker_command_with_binary, EXECUTOR_PATH_MAX, "%s %s", 
docker_binary, docker_command);
 
   FILE* start_docker = popen(docker_command_with_binary, "r");
   if (pclose (start_docker) != 0)
@@ -1268,7 +1268,7 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
     goto cleanup;
   }
 
-  snprintf(docker_inspect_command, PATH_MAX,
+  snprintf(docker_inspect_command, EXECUTOR_PATH_MAX,
     "%s inspect --format {{.State.Pid}} %s",
     docker_binary, container_id);
 
@@ -1307,7 +1307,7 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
       goto cleanup;
     }
 
-    snprintf(docker_wait_command, PATH_MAX,
+    snprintf(docker_wait_command, EXECUTOR_PATH_MAX,
       "%s wait %s", docker_binary, container_id);
 
     FILE* wait_docker = popen(docker_wait_command, "r");
@@ -1318,7 +1318,7 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
       fflush(ERRORFILE);
     }
     if(exit_code != 0) {
-      snprintf(docker_logs_command, PATH_MAX, "%s logs --tail=250 %s",
+      snprintf(docker_logs_command, EXECUTOR_PATH_MAX, "%s logs --tail=250 %s",
         docker_binary, container_id);
       FILE* logs = popen(docker_logs_command, "r");
       if(logs != NULL) {
@@ -1347,7 +1347,7 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
     }
   }
 
-  snprintf(docker_rm_command, PATH_MAX,
+  snprintf(docker_rm_command, EXECUTOR_PATH_MAX,
     "%s rm %s", docker_binary, container_id);
   FILE* rm_docker = popen(docker_rm_command, "w");
   if (pclose (rm_docker) != 0)
@@ -1766,7 +1766,7 @@ int mount_cgroup(const char *pair, const char *hierarchy) 
{
 #else
   char *controller = malloc(strlen(pair));
   char *mount_path = malloc(strlen(pair));
-  char hier_path[PATH_MAX];
+  char hier_path[EXECUTOR_PATH_MAX];
   int result = 0;
 
   if (get_kv_key(pair, controller, strlen(pair)) < 0 ||
@@ -1778,7 +1778,7 @@ int mount_cgroup(const char *pair, const char *hierarchy) 
{
     if (mount("none", mount_path, "cgroup", 0, controller) == 0) {
       char *buf = stpncpy(hier_path, mount_path, strlen(mount_path));
       *buf++ = '/';
-      snprintf(buf, PATH_MAX - (buf - hier_path), "%s", hierarchy);
+      snprintf(buf, EXECUTOR_PATH_MAX - (buf - hier_path), "%s", hierarchy);
 
       // create hierarchy as 0750 and chown to Hadoop NM user
       const mode_t perms = S_IRWXU | S_IRGRP | S_IXGRP;

Reply via email to