AMBARI-20400 Yarn should not copy Tez and Slider tar ball if Tez and Sliders 
are not installed on the cluster (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3ab4c8d2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3ab4c8d2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3ab4c8d2

Branch: refs/heads/branch-dev-logsearch
Commit: 3ab4c8d2ae0a7f28bc73d8438f79d2f18f96cc01
Parents: c5ccb1a
Author: Di Li <d...@apache.org>
Authored: Mon Mar 27 15:39:33 2017 -0400
Committer: Di Li <d...@apache.org>
Committed: Mon Mar 27 15:39:33 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/copy_tarball.py         | 24 ++++++++++++++++++-
 .../python/stacks/2.0.6/configs/default.json    | 25 ++++++++++++++++++++
 2 files changed, 48 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3ab4c8d2/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
----------------------------------------------------------------------
diff --git 
a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
 
b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
index 31a9be4..63b6926 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
@@ -64,6 +64,18 @@ TARBALL_MAP = {
              
"/{0}/apps/{1}/spark2/spark2-{0}-yarn-archive.tar.gz".format(STACK_NAME_PATTERN,
 STACK_VERSION_PATTERN))
 }
 
+SERVICE_MAP = {
+  "slider": "SLIDER",
+  "tez": "TEZ_CLIENT",
+  "pig": "PIG",
+  "sqoop": "SQOOP_CLIENT",
+  "hive": "HIVE_CLIENT",
+  "mapreduce": "HADOOP_CLIENT",
+  "hadoop_streaming": "MAPREDUCE2_CLIENT",
+  "tez_hive2": "HIVE_CLIENT",
+  "spark": "SPARK_CLIENT",
+  "spark2": "SPARK2_CLIENT"
+}
 
 def get_sysprep_skip_copy_tarballs_hdfs():
   import params
@@ -199,7 +211,7 @@ def _get_single_version_from_stack_select():
 
 
 def copy_to_hdfs(name, user_group, owner, file_mode=0444, 
custom_source_file=None, custom_dest_file=None, force_execute=False,
-                 use_upgrading_version_during_upgrade=True, 
replace_existing_files=False, skip=False):
+                 use_upgrading_version_during_upgrade=True, 
replace_existing_files=False, skip=False, skip_component_check=False):
   """
   :param name: Tarball name, e.g., tez, hive, pig, sqoop.
   :param user_group: Group to own the directory.
@@ -210,6 +222,8 @@ def copy_to_hdfs(name, user_group, owner, file_mode=0444, 
custom_source_file=Non
   :param force_execute: If true, will execute the HDFS commands immediately, 
otherwise, will defer to the calling function.
   :param use_upgrading_version_during_upgrade: If true, will use the version 
going to during upgrade. Otherwise, use the CURRENT (source) version.
   :param skip: If true, tarballs will not be copied as the cluster deployment 
uses prepped VMs.
+  :param skip_component_check: If true, will skip checking if a given 
component is installed on the node for a file under its dir to be copied.
+                               This is in case the file is not mapped to a 
component but rather to a specific location (JDK jar, Ambari jar, etc).
   :return: Will return True if successful, otherwise, False.
   """
   import params
@@ -226,6 +240,14 @@ def copy_to_hdfs(name, user_group, owner, file_mode=0444, 
custom_source_file=Non
     Logger.warning("Skipping copying {0} to {1} for {2} as it is a sys prepped 
host.".format(str(source_file), str(dest_file), str(name)))
     return True
 
+  if not skip_component_check:
+    #Use components installed on the node to check if a file can be copied 
into HDFS
+    local_components = default("/localComponents", [])
+    component = SERVICE_MAP.get(name)
+    if component not in local_components:
+      Logger.info("{0} is not installed on the host. Skip copying 
{1}".format(component, source_file))
+      return False
+
   Logger.info("Source file: {0} , Dest file in HDFS: {1}".format(source_file, 
dest_file))
 
   if not os.path.exists(source_file):

http://git-wip-us.apache.org/repos/asf/ambari/blob/3ab4c8d2/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index fa7419f..ceb0ca0 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -1,4 +1,26 @@
 {
+    "localComponents": [
+        "NAMENODE",
+        "SECONDARY_NAMENODE",
+        "ZOOKEEPER_SERVER",
+        "DATANODE",
+        "HDFS_CLIENT",
+        "ZOOKEEPER_CLIENT",
+        "RESOURCEMANAGER",
+        "HISTORYSERVER",
+        "NODEMANAGER",
+        "YARN_CLIENT",
+        "MAPREDUCE2_CLIENT",
+        "SLIDER",
+        "PIG",
+        "SQOOP_CLIENT",
+        "HIVE_CLIENT",
+        "TEZ_CLIENT",
+        "HADOOP_CLIENT",
+        "HIVE_CLIENT",
+        "SPARK_CLIENT",
+        "SPARK2_CLIENT"
+    ],
     "roleCommand": "SERVICE_CHECK",
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
@@ -544,6 +566,9 @@
             "tez.staging-dir": "/tmp/${user.name}/staging",
             "tez.am.am-rm.heartbeat.interval-ms.max": "250"
         },
+        "slider-env": {
+            "content": "export JAVA_HOME={{java64_home}}\nexport 
HADOOP_CONF_DIR={{hadoop_conf_dir}}"
+        },
         "yarn-env": {
             "yarn_pid_dir_prefix": "/var/run/hadoop-yarn",
             "apptimelineserver_heapsize": "1024",

Reply via email to