This is an automated email from the ASF dual-hosted git repository.

jluniya pushed a commit to branch branch-feature-AMBARI-14714
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-feature-AMBARI-14714 by 
this push:
     new 0342af0  AMBARI-23408: Update all references to command.json in 
stack-hooks to… (#855)
0342af0 is described below

commit 0342af06add05a7a0868f9ab4baaa7b27c29954c
Author: sduan <sd...@hortonworks.com>
AuthorDate: Tue Apr 3 09:28:49 2018 -0700

    AMBARI-23408: Update all references to command.json in stack-hooks to… 
(#855)
    
    * AMBARI-23408: Update all references to command.json in stack-hooks to use 
new execution_command library
    
    * AMBARI-23408: Update all references to command.json in stack-hooks to use 
new execution_command library
---
 .../execution_command/execution_command.py         |   2 +-
 .../libraries/functions/config_helper.py           |  45 -----
 .../resource_management/libraries/script/script.py |  15 +-
 .../stack-hooks/after-INSTALL/scripts/params.py    |  55 +++---
 .../after-INSTALL/scripts/shared_initialization.py |   6 +-
 .../stack-hooks/before-ANY/scripts/params.py       | 153 ++++++++-------
 .../before-ANY/scripts/shared_initialization.py    |   8 +-
 .../stack-hooks/before-INSTALL/scripts/params.py   |  81 ++++----
 .../before-START/scripts/custom_extensions.py      |  27 +--
 .../stack-hooks/before-START/scripts/params.py     | 216 ++++++++++-----------
 10 files changed, 284 insertions(+), 324 deletions(-)

diff --git 
a/ambari-common/src/main/python/resource_management/libraries/execution_command/execution_command.py
 
b/ambari-common/src/main/python/resource_management/libraries/execution_command/execution_command.py
index 55c65dc..275574f 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/execution_command/execution_command.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/execution_command/execution_command.py
@@ -72,7 +72,7 @@ class ExecutionCommand(object):
     At this time it returns hardcoded 'default' name
     :return:
     """
-    return self.__get_value("default")
+    return "default"
 
   def get_servicegroup_name(self):
     return self.__get_value("serviceGroupName")
diff --git 
a/ambari-common/src/main/python/resource_management/libraries/functions/config_helper.py
 
b/ambari-common/src/main/python/resource_management/libraries/functions/config_helper.py
deleted file mode 100644
index 2d85d29..0000000
--- 
a/ambari-common/src/main/python/resource_management/libraries/functions/config_helper.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-
-def get_mpack_name(config):
-  return config['hostLevelParams']['stack_name']
-
-
-def get_mpack_version(config):
-  return config['hostLevelParams']['stack_version']
-
-
-def get_mpack_instance_name(config):
-  return config['serviceGroupName']
-
-
-def get_module_name(config):
-  return config['serviceName']
-
-
-def get_component_type(config):
-  return config['role']
-
-
-def get_component_instance_name(config):
-  return "default"
diff --git 
a/ambari-common/src/main/python/resource_management/libraries/script/script.py 
b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 929d65f..d9a956d 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -61,8 +61,6 @@ from resource_management.libraries.functions.stack_features 
import check_stack_f
 from resource_management.libraries.functions.constants import StackFeature
 from resource_management.libraries.functions.show_logs import show_logs
 from resource_management.libraries.functions.fcntl_based_process_lock import 
FcntlBasedProcessLock
-from resource_management.libraries.functions.config_helper import 
get_mpack_name, get_mpack_version, \
-  get_mpack_instance_name, get_module_name, get_component_type, 
get_component_instance_name
 from resource_management.libraries.execution_command.execution_command import 
ExecutionCommand
 from resource_management.libraries.execution_command.module_configs import 
ModuleConfigs
 
@@ -790,12 +788,13 @@ class Script(object):
     # should be used only when mpack-instance-manager is available
     from resource_management.libraries.functions.mpack_manager_helper import 
create_component_instance
     config = self.get_config()
-    mpack_name = get_mpack_name(config)
-    mpack_version = get_mpack_version(config)
-    mpack_instance_name = get_mpack_instance_name(config)
-    module_name = get_module_name(config)
-    component_type = get_component_type(config)
-    component_instance_name = get_component_instance_name(config)
+    execution_command = self.get_execution_command()
+    mpack_name = execution_command.get_mpack_name()
+    mpack_version = execution_command.get_mpack_version()
+    mpack_instance_name = execution_command.get_servicegroup_name()
+    module_name = execution_command.get_module_name()
+    component_type = execution_command.get_component_type()
+    component_instance_name = execution_command.get_component_instance_name()
 
     create_component_instance(mpack_name=mpack_name, 
mpack_version=mpack_version, instance_name=mpack_instance_name,
                               module_name=module_name, 
components_instance_type=component_type,
diff --git 
a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py 
b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py
index ff4d723..bc81dd6 100644
--- 
a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py
@@ -25,33 +25,36 @@ from resource_management.libraries.script.script import 
get_config_lock_file
 from resource_management.libraries.functions import default
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.format_jvm_option import 
format_jvm_option_value
 from resource_management.libraries.functions.version import 
format_stack_version, get_major_version
 from resource_management.libraries.functions.cluster_settings import 
get_cluster_setting_value
+from resource_management.libraries.execution_command import execution_command
+from resource_management.libraries.execution_command import module_configs
 from string import lower
 
-config = Script.get_config()
+execution_command = Script.get_execution_command()
+module_configs = Script.get_module_configs()
+module_name = execution_command.get_module_name()
 tmp_dir = Script.get_tmp_dir()
 
-dfs_type = default("/commandParams/dfs_type", "")
+dfs_type = execution_command.get_dfs_type()
 
-is_parallel_execution_enabled = 
int(default("/agentConfigParams/agent/parallel_execution", 0)) == 1
-host_sys_prepped = default("/ambariLevelParams/host_sys_prepped", False)
+is_parallel_execution_enabled = 
execution_command.check_agent_config_execute_in_parallel() == 1
+host_sys_prepped = execution_command.is_host_system_prepared()
 
 sudo = AMBARI_SUDO_BINARY
 
-stack_version_unformatted = config['clusterLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
+stack_version_formatted = execution_command.get_mpack_version() 
 major_stack_version = get_major_version(stack_version_formatted)
 
 # service name
-service_name = config['serviceName']
+service_name = execution_command.get_module_name()
 
 # logsearch configuration
 logsearch_logfeeder_conf = "/usr/lib/ambari-logsearch-logfeeder/conf"
 
-agent_cache_dir = config['agentLevelParams']['agentCacheDir']
-service_package_folder = config['commandParams']['service_package_folder']
+agent_cache_dir = execution_command.get_agent_cache_dir()
+service_package_folder = execution_command.get_module_package_folder()
 logsearch_service_name = service_name.lower().replace("_", "-")
 logsearch_config_file_name = 'input.config-' + logsearch_service_name + ".json"
 logsearch_config_file_path = agent_cache_dir + "/" + service_package_folder + 
"/templates/" + logsearch_config_file_name + ".j2"
@@ -68,36 +71,36 @@ versioned_stack_root = '/usr/hdp/current'
 security_enabled = get_cluster_setting_value('security_enabled')
 
 #java params
-java_home = config['ambariLevelParams']['java_home']
+java_home = execution_command.get_java_home()
 
 #hadoop params
-hdfs_log_dir_prefix = 
config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = 
config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = 
config['configurations']['hadoop-env']['hadoop_root_logger']
+hdfs_log_dir_prefix = module_configs.get_property_value(module_name, 
'hadoop-env', 'hdfs_log_dir_prefix')
+hadoop_pid_dir_prefix = module_configs.get_property_value(module_name, 
'hadoop-env', 'hadoop_pid_dir_prefix')
+hadoop_root_logger = module_configs.get_property_value(module_name, 
'hadoop-env', 'hadoop_root_logger')
 
 jsvc_path = "/usr/lib/bigtop-utils"
 
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = 
config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = 
config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = 
format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = 
format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+hadoop_heapsize = module_configs.get_property_value(module_name, 'hadoop-env', 
'hadoop_heapsize')
+namenode_heapsize = module_configs.get_property_value(module_name, 
'hadoop-env', 'namenode_heapsize')
+namenode_opt_newsize = module_configs.get_property_value(module_name, 
'hadoop-env', 'namenode_opt_newsize')
+namenode_opt_maxnewsize = module_configs.get_property_value(module_name, 
'hadoop-env', 'namenode_opt_maxnewsize')
+namenode_opt_permsize = 
format_jvm_option_value(module_configs.get_property_value(module_name, 
'hadoop-env', 'namenode_opt_permsize', '128m'))
+namenode_opt_maxpermsize = 
format_jvm_option_value(module_configs.get_property_value(module_name, 
'hadoop-env', 'namenode_opt_maxpermsize', '256m'))
 
 jtnode_opt_newsize = "200m"
 jtnode_opt_maxnewsize = "200m"
 jtnode_heapsize =  "1024m"
 ttnode_heapsize = "1024m"
 
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = 
default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = 
default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+dtnode_heapsize = module_configs.get_property_value(module_name, 'hadoop-env', 
'dtnode_heapsize')
+mapred_pid_dir_prefix = module_configs.get_property_value(module_name, 
'mapred-env', 'mapred_pid_dir_prefix', '/var/run/hadoop-mapreduce')
+mapred_log_dir_prefix = module_configs.get_property_value(module_name, 
'mapred-env', 'mapred_log_dir_prefix', '/var/log/hadoop-mapreduce')
 
 #users and groups
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_user = module_configs.get_property_value(module_name, 'hadoop-env', 
'hdfs_user')
 user_group = get_cluster_setting_value('user_group')
 
-namenode_host = default("/clusterHostInfo/namenode_hosts", [])
+namenode_host = 
execution_command._execution_command.__get_value("clusterHostInfo/namenode_hosts",
 [])
 has_namenode = not len(namenode_host) == 0
 
 if has_namenode or dfs_type == 'HCFS':
@@ -106,4 +109,4 @@ if has_namenode or dfs_type == 'HCFS':
 link_configs_lock_file = get_config_lock_file()
 stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
 
-upgrade_suspended = default("/roleParams/upgrade_suspended", False)
+upgrade_suspended = execution_command.is_upgrade_suspended()
diff --git 
a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
 
b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
index 22543bc..3febc85 100644
--- 
a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
+++ 
b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
@@ -29,6 +29,8 @@ from resource_management.libraries.functions.default import 
default
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.fcntl_based_process_lock import 
FcntlBasedProcessLock
 from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.execution_command import execution_command
+from resource_management.libraries.execution_command import module_configs
 from resource_management.libraries.script import Script
 
 
@@ -81,8 +83,8 @@ def setup_config():
     # create core-site only if the hadoop config diretory exists
     XmlConfig("core-site.xml",
               conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['core-site'],
-              
configuration_attributes=params.config['configurationAttributes']['core-site'],
+              
configurations=params.module_configs.get_property_value(params.module_name, 
'core-site', ''),
+              
configuration_attributes=params.execution_command._execution_command.__get_value("configurationAttributes/core-site"),
               owner=params.hdfs_user,
               group=params.user_group,
               only_if=format("ls {hadoop_conf_dir}"))
diff --git 
a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py 
b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py
index b083702..50168e1 100644
--- a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py
@@ -27,7 +27,7 @@ import ambari_simplejson as json # simplejson is much faster 
comparing to Python
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import default
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.format_jvm_option import 
format_jvm_option_value
 from resource_management.libraries.functions.is_empty import is_empty
 from resource_management.libraries.functions.version import 
format_stack_version
 from resource_management.libraries.functions.expect import expect
@@ -37,51 +37,55 @@ from resource_management.libraries.functions.stack_features 
import get_stack_fea
 from resource_management.libraries.functions.get_architecture import 
get_architecture
 from resource_management.libraries.functions.cluster_settings import 
get_cluster_setting_value
 from ambari_commons.constants import AMBARI_SUDO_BINARY, 
HADOOP_CLIENTS_MODULE_NAME, HADOOP_CLIENT_COMPONENT_TYPE
-import resource_management.libraries.functions.config_helper as config_helper
 from resource_management.libraries.functions.mpack_manager_helper import 
get_component_conf_path, get_component_home_path
-
+from resource_management.libraries.execution_command import execution_command
+from resource_management.libraries.execution_command import module_configs
 
 config = Script.get_config()
+execution_command = Script.get_execution_command()
+module_configs = Script.get_module_configs()
+module_name = execution_command.get_module_name()
 tmp_dir = Script.get_tmp_dir()
 
 stack_root = Script.get_stack_root()
 
 architecture = get_architecture()
 
-dfs_type = default("/commandParams/dfs_type", "")
+dfs_type = execution_command.get_dfs_type()
 
 artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/ambariLevelParams/jdk_name", None)
-java_home = config['ambariLevelParams']['java_home']
-java_version = expect("/ambariLevelParams/java_version", int)
-jdk_location = config['ambariLevelParams']['jdk_location']
+jdk_name = execution_command.get_jdk_name()
+java_home = execution_command.get_jdk_home()
+java_version = execution_command.get_java_version()
+jdk_location = execution_command.get_jdk_location()
 
-hadoop_custom_extensions_enabled = 
default("/configurations/core-site/hadoop.custom-extensions.enabled", False)
+hadoop_custom_extensions_enabled = 
module_configs.get_property_value(module_name, 'core-site', 
'hadoop.custom-extensions.enabled', False)
 
 sudo = AMBARI_SUDO_BINARY
 
-ambari_server_hostname = config['ambariLevelParams']['ambari_server_host']
+ambari_server_hostname = execution_command.get_ambari_server_host()
 
-stack_version_unformatted = config['clusterLevelParams']['stack_version']
-stack_version_formatted = config['clusterLevelParams']['stack_version']
-#stack_version_formatted = format_stack_version(stack_version_unformatted)
+stack_version_formatted = execution_command.get_mpack_version()
 
-upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", 
""))
-version = default("/commandParams/version", None)
+upgrade_type = Script.get_upgrade_type(execution_command.get_upgrade_type())
+version = execution_command.get_new_mpack_version_for_upgrade()
 # Handle upgrade and downgrade
 if (upgrade_type is not None) and version:
   stack_version_formatted = format_stack_version(version)
-ambari_java_home = default("/commandParams/ambari_java_home", None)
-ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
+"""
+??? is this the same as ambariLevelParams/java_home and 
ambariLevelParams/java_name ???
+"""
+ambari_java_home = execution_command.get_ambari_java_home()
+ambari_jdk_name = execution_command.get_ambari_jdk_name()
 
 security_enabled = get_cluster_setting_value('security_enabled')
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_user = module_configs.get_property_value(module_name, 'hadoop-env', 
'hdfs_user')
 
 # Some datanode settings
-dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
-dfs_dn_http_addr = 
default('/configurations/hdfs-site/dfs.datanode.http.address', None)
-dfs_dn_https_addr = 
default('/configurations/hdfs-site/dfs.datanode.https.address', None)
-dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
+dfs_dn_addr = module_configs.get_property_value(module_name, 'hdfs-site', 
'dfs.datanode.address')
+dfs_dn_http_addr = module_configs.get_property_value(module_name, 'hdfs-site', 
'dfs.datanode.http.address')
+dfs_dn_https_addr = module_configs.get_property_value(module_name, 
'hdfs-site', 'dfs.datanode.https.address')
+dfs_http_policy = module_configs.get_property_value(module_name, 'hdfs-site', 
'dfs.http.policy')
 secure_dn_ports_are_in_use = False
 
 def get_port(address):
@@ -110,20 +114,19 @@ def is_secure_port(port):
 # force the use of "current" in the hook
 hdfs_user_nofile_limit = 
default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
 
-mpack_name = config_helper.get_mpack_name(config)
-mpack_instance_name = config_helper.get_mpack_instance_name(config)
-module_name = config_helper.get_module_name(config)
-component_type = config_helper.get_component_type(config)
-component_instance_name = config_helper.get_component_instance_name(config)
+mpack_name = execution_command.get_mpack_name()
+mpack_instance_name = execution_command.get_servicegroup_name()
+module_name = execution_command.get_module_name()
+component_type = execution_command.get_component_type()
+component_instance_name = execution_command.get_component_instance_name()
 
-stack_name = default("/clusterLevelParams/stack_name", None)
-stack_name = stack_name.lower()
+stack_name = mpack_name.lower()
 component_directory = "namenode"
 
 hadoop_dir = "/etc/hadoop"
 hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
-datanode_max_locked_memory = 
config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
-is_datanode_max_locked_memory_set = not 
is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
+datanode_max_locked_memory = module_configs.get_property_value(module_name, 
'hdfs-site', 'dfs.datanode.max.locked.memory')
+is_datanode_max_locked_memory_set = not 
is_empty(module_configs.get_property_value(module_name, 'hdfs-site', 
'dfs.datanode.max.locked.memory'))
 
 mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
 
@@ -146,51 +149,51 @@ else:
     hadoop_secure_dn_user = '""'
 
 #hadoop params
-hdfs_log_dir_prefix = 
config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = 
config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = 
config['configurations']['hadoop-env']['hadoop_root_logger']
+hdfs_log_dir_prefix = module_configs.get_property_value(module_name, 
'hadoop-env', 'hdfs_log_dir_prefix')
+hadoop_pid_dir_prefix = module_configs.get_property_value(module_name, 
'hadoop-env', 'hadoop_pid_dir_prefix')
+hadoop_root_logger = module_configs.get_property_value(module_name, 
'hadoop-env', 'hadoop_root_logger')
 
 jsvc_path = "/usr/lib/bigtop-utils"
 
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = 
config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = 
config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = 
format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = 
format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+hadoop_heapsize = module_configs.get_property_value(module_name, 'hadoop-env', 
'hadoop_heapsize')
+namenode_heapsize = module_configs.get_property_value(module_name, 
'hadoop-env', 'namenode_heapsize')
+namenode_opt_newsize = module_configs.get_property_value(module_name, 
'hadoop-env', 'namenode_opt_newsize')
+namenode_opt_maxnewsize = module_configs.get_property_value(module_name, 
'hadoop-env', 'namenode_opt_maxnewsize')
+namenode_opt_permsize = 
format_jvm_option_value(module_configs.get_property_value(module_name, 
'hadoop-env', 'namenode_opt_permsize', '128m'))
+namenode_opt_maxpermsize = 
format_jvm_option_value(module_configs.get_property_value(module_name, 
'hadoop-env', 'namenode_opt_maxpermsize', '256m'))
 
 jtnode_opt_newsize = "200m"
 jtnode_opt_maxnewsize = "200m"
 jtnode_heapsize =  "1024m"
 ttnode_heapsize = "1024m"
 
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-nfsgateway_heapsize = 
config['configurations']['hadoop-env']['nfsgateway_heapsize']
-mapred_pid_dir_prefix = 
default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = 
default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
+dtnode_heapsize = module_configs.get_property_value(module_name, 'hadoop-env', 
'dtnode_heapsize')
+nfsgateway_heapsize = module_configs.get_property_value(module_name, 
'hadoop-env', 'nfsgateway_heapsize')
+mapred_pid_dir_prefix = module_configs.get_property_value(module_name, 
'mapred-env', 'mapred_pid_dir_prefix', '/var/run/hadoop-mapreduce')
+mapred_log_dir_prefix = module_configs.get_property_value(module_name, 
'mapred-env', 'mapred_log_dir_prefix', '/var/log/hadoop-mapreduce')
+hadoop_env_sh_template = module_configs.get_property_value(module_name, 
'hadoop-env', 'content')
 
 #users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
+hbase_user = module_configs.get_property_value(module_name, 'hbase-env', 
'hbase_user')
 smoke_user =  get_cluster_setting_value('smokeuser')
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = config['configurations']['tez-env']["tez_user"]
-oozie_user = config['configurations']['oozie-env']["oozie_user"]
-falcon_user = config['configurations']['falcon-env']["falcon_user"]
-ranger_user = config['configurations']['ranger-env']["ranger_user"]
-zeppelin_user = config['configurations']['zeppelin-env']["zeppelin_user"]
-zeppelin_group = config['configurations']['zeppelin-env']["zeppelin_group"]
+gmetad_user = module_configs.get_property_value(module_name, 'ganglia-env', 
'gmetad_user')
+gmond_user = module_configs.get_property_value(module_name, 'ganglia-env', 
'gmond_user')
+tez_user = module_configs.get_property_value(module_name, 'tez-env', 
'tez_user')
+oozie_user = module_configs.get_property_value(module_name, 'oozie-env', 
'oozie_user')
+falcon_user = module_configs.get_property_value(module_name, 'falcon-env', 
'falcon_user')
+ranger_user = module_configs.get_property_value(module_name, 'ranger-env', 
'ranger_user')
+zeppelin_user = module_configs.get_property_value(module_name, 'zeppelin-env', 
'zeppelin_user')
+zeppelin_group = module_configs.get_property_value(module_name, 
'zeppelin-env', 'zeppelin_group')
 
 user_group = get_cluster_setting_value('user_group')
 
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_hosts", [])
-namenode_host = default("/clusterHostInfo/namenode_hosts", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-zeppelin_master_hosts = default("/clusterHostInfo/zeppelin_master_hosts", [])
+ganglia_server_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/ganglia_server_hosts",
 [])
+namenode_host = 
execution_command._execution_command.__get_value("clusterHostInfo/namenode_hosts",
 [])
+hbase_master_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/hbase_master_hosts",
 [])
+oozie_servers = 
execution_command._execution_command.__get_value("clusterHostInfo/oozie_server",
 [])
+falcon_server_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/falcon_server_hosts",
 [])
+ranger_admin_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/ranger_admin_hosts",
 [])
+zeppelin_master_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/zeppelin_master_hosts",
 [])
 
 # get the correct version to use for checking stack features
 version_for_stack_feature_checks = get_stack_feature_version(config)
@@ -198,7 +201,7 @@ version_for_stack_feature_checks = 
get_stack_feature_version(config)
 
 has_namenode = not len(namenode_host) == 0
 has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_tez = 'tez-site' in config['configurations']
+has_tez = module_configs.get_property_value(module_name, 'tez-site', '') is 
not None
 has_hbase_masters = not len(hbase_master_hosts) == 0
 has_oozie_server = not len(oozie_servers) == 0
 has_falcon_server_hosts = not len(falcon_server_hosts) == 0
@@ -208,10 +211,10 @@ stack_supports_zk_security = 
check_stack_feature(StackFeature.SECURE_ZOOKEEPER,
 
 # HDFS High Availability properties
 dfs_ha_enabled = False
-dfs_ha_nameservices = 
default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+dfs_ha_nameservices = module_configs.get_property_value(module_name, 
'hdfs-site', 'dfs.internal.nameservices')
 if dfs_ha_nameservices is None:
-  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', 
None)
-dfs_ha_namenode_ids = 
default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"),
 None)
+  dfs_ha_nameservices = module_configs.get_property_value(module_name, 
'hdfs-site', 'dfs.nameservices')
+dfs_ha_namenode_ids = module_configs.get_property_value(module_name, 
'hdfs-site', 'dfs.ha.namenodes.{dfs_ha_nameservices}')
 if dfs_ha_namenode_ids:
   dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
   dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
@@ -226,9 +229,9 @@ if has_namenode or dfs_type == 'HCFS':
 
 hbase_tmp_dir = "/tmp/hbase-hbase"
 
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-ranger_group = config['configurations']['ranger-env']['ranger_group']
-dfs_cluster_administrators_group = 
config['configurations']['hdfs-site']["dfs.cluster.administrators"]
+proxyuser_group = module_configs.get_property_value(module_name, 'hadoop-env', 
'proxyuser_group', 'users')
+ranger_group = module_configs.get_property_value(module_name, 'ranger-env', 
'ranger_group')
+dfs_cluster_administrators_group = 
module_configs.get_property_value(module_name, 'hdfs-site', 
'dfs.cluster.administrators')
 
 sysprep_skip_create_users_and_groups = 
get_cluster_setting_value('sysprep_skip_create_users_and_groups')
 ignore_groupsusers_create = 
get_cluster_setting_value('ignore_groupsusers_create')
@@ -238,14 +241,14 @@ smoke_user_dirs = 
format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},
 if has_hbase_masters:
   hbase_user_dirs = 
format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
 #repo params
-repo_info = config['hostLevelParams']['repoInfo']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)
+repo_info = execution_command.get_repo_info()
+service_repo_info = execution_command.get_service_repo_info()
 
 user_to_groups_dict = {}
 
 #Append new user-group mapping to the dict
 try:
-  user_group_map = 
ast.literal_eval(config['clusterLevelParams']['user_groups'])
+  user_group_map = ast.literal_eval(execution_command.get_user_groups())
   for key in user_group_map.iterkeys():
     user_to_groups_dict[key] = user_group_map[key]
 except ValueError:
@@ -253,11 +256,11 @@ except ValueError:
 
 user_to_gid_dict = collections.defaultdict(lambda:user_group)
 
-user_list = json.loads(config['clusterLevelParams']['user_list'])
-group_list = json.loads(config['clusterLevelParams']['group_list'])
-host_sys_prepped = default("/ambariLevelParams/host_sys_prepped", False)
+user_list = json.loads(execution_command.get_user_list())
+group_list = json.loads(execution_command.get_group_list())
+host_sys_prepped = execution_command.is_host_system_prepared()
 
-tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
+tez_am_view_acls = module_configs.get_property_value(module_name, 'tez-site', 
'tez.am.view-acls')
 override_uid = get_cluster_setting_value('override_uid')
 
 # if NN HA on secure clutser, access Zookeper securely
diff --git 
a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py
 
b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py
index b63c1ff..3690821 100644
--- 
a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py
+++ 
b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py
@@ -25,6 +25,8 @@ from copy import copy
 from resource_management.libraries.functions.version import compare_versions
 from resource_management import *
 from resource_management.core import shell
+from resource_management.libraries.execution_command import execution_command
+from resource_management.libraries.execution_command import module_configs
 
 def setup_users():
   """
@@ -154,11 +156,11 @@ def get_uid(user, return_existing=False):
   """
   import params
   user_str = str(user) + "_uid"
-  service_env = [ serviceEnv for serviceEnv in params.config['configurations'] 
if user_str in params.config['configurations'][serviceEnv]]
+  service_env = [ serviceEnv for serviceEnv in params.module_configs if 
user_str in params.module_configs.get_property_value(params.module_name, 
serviceEnv, "")]
 
-  if service_env and params.config['configurations'][service_env[0]][user_str]:
+  if service_env and 
params.module_configs.get_property_value(params.module_name, service_env[0], 
user_str):
     service_env_str = str(service_env[0])
-    uid = params.config['configurations'][service_env_str][user_str]
+    uid = params.module_configs.get_property_value(params.module_name, 
service_env_str, user_str)
     if len(service_env) > 1:
       Logger.warning("Multiple values found for %s, using %s"  % (user_str, 
uid))
     return uid
diff --git 
a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py 
b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py
index 63e29b7..3004f1f 100644
--- 
a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py
@@ -24,63 +24,68 @@ from resource_management.libraries.script.script import 
Script
 from resource_management.libraries.functions import default, format
 from resource_management.libraries.functions.expect import expect
 from resource_management.libraries.functions.cluster_settings import 
get_cluster_setting_value
+from resource_management.libraries.execution_command import execution_command
+from resource_management.libraries.execution_command import module_configs
 
 config = Script.get_config()
+execution_command = Script.get_execution_command()
+module_configs = Script.get_module_configs()
+module_name = execution_command.get_module_name()
 tmp_dir = Script.get_tmp_dir()
 sudo = AMBARI_SUDO_BINARY
 
-stack_version_unformatted = config['clusterLevelParams']['stack_version']
-agent_stack_retry_on_unavailability = 
config['ambariLevelParams']['agent_stack_retry_on_unavailability']
-agent_stack_retry_count = expect("/ambariLevelParams/agent_stack_retry_count", 
int)
+stack_version_unformatted = execution_command.get_mpack_version()
+agent_stack_retry_on_unavailability = 
execution_command.check_agent_stack_want_retry_on_unavailability()
+agent_stack_retry_count = 
execution_command.get_agent_stack_retry_count_on_unavailability()
 stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 #users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
+hbase_user = module_configs.get_property_value(module_name, 'hbase-env', 
'hbase_user')
 smoke_user = get_cluster_setting_value('smokeuser')
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = config['configurations']['tez-env']["tez_user"]
+gmetad_user = module_configs.get_property_value(module_name, 'ganglia-env', 
'gmetad_user')
+gmond_user = module_configs.get_property_value(module_name, 'ganglia-env', 
'gmond_user')
+tez_user = module_configs.get_property_value(module_name, 'tez-env', 
'tez_user')
 
 user_group = get_cluster_setting_value('user_group')
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+proxyuser_group = module_configs.get_property_value(module_name, 'hadoop-env', 
'proxyuser_group', 'users')
 
-hdfs_log_dir_prefix = 
config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hdfs_log_dir_prefix = module_configs.get_property_value(module_name, 
'hadoop-env', 'hdfs_log_dir_prefix')
 
 # repo templates
 repo_rhel_suse =  get_cluster_setting_value('repo_suse_rhel_template')
 repo_ubuntu =  get_cluster_setting_value('repo_ubuntu_template')
 
 #hosts
-hostname = config['agentLevelParams']['hostname']
-ambari_server_hostname = config['ambariLevelParams']['ambari_server_host']
-rm_host = default("/clusterHostInfo/resourcemanager_hosts", [])
-slave_hosts = default("/clusterHostInfo/datanode_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_hosts", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_hosts", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/historyserver_hosts", [])
-jtnode_host = default("/clusterHostInfo/jtnode_hosts", [])
-namenode_host = default("/clusterHostInfo/namenode_hosts", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_server_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_hosts", [])
-storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
-falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
-
-has_sqoop_client = 'sqoop-env' in config['configurations']
+hostname = execution_command.get_host_name()
+ambari_server_hostname = execution_command.get_ambari_server_host()
+rm_host = 
execution_command._execution_command.__get_value("clusterHostInfo/resourcemanager_hosts",
 [])
+slave_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/datanode_hosts",
 [])
+oozie_servers = 
execution_command._execution_command.__get_value("clusterHostInfo/oozie_server",
 [])
+hcat_server_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/webhcat_server_hosts",
 [])
+hive_server_host =  
execution_command._execution_command.__get_value("clusterHostInfo/hive_server_hosts",
 [])
+hbase_master_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/hbase_master_hosts",
 [])
+hs_host = 
execution_command._execution_command.__get_value("clusterHostInfo/historyserver_hosts",
 [])
+jtnode_host = 
execution_command._execution_command.__get_value("clusterHostInfo/jtnode_hosts",
 [])
+namenode_host = 
execution_command._execution_command.__get_value("clusterHostInfo/namenode_hosts",
 [])
+zk_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/zookeeper_server_hosts",
 [])
+ganglia_server_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/ganglia_server_hosts",
 [])
+storm_server_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/nimbus_hosts",
 [])
+falcon_host = 
execution_command._execution_command.__get_value("clusterHostInfo/falcon_server_hosts",
 [])
+
+has_sqoop_client = 'sqoop-env' in module_configs
 has_namenode = not len(namenode_host) == 0
 has_hs = not len(hs_host) == 0
 has_resourcemanager = not len(rm_host) == 0
 has_slaves = not len(slave_hosts) == 0
 has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
+has_hcat_server_host = not len(hcat_server_hosts) == 0
+has_hive_server_host = not len(hive_server_host) == 0
 has_hbase_masters = not len(hbase_master_hosts) == 0
 has_zk_host = not len(zk_hosts) == 0
 has_ganglia_server = not len(ganglia_server_hosts) == 0
 has_storm_server = not len(storm_server_hosts) == 0
 has_falcon_server = not len(falcon_host) == 0
-has_tez = 'tez-site' in config['configurations']
+has_tez = module_configs.get_property_value(module_name, 'tez-site', '') is 
not None
 
 is_namenode_master = hostname in namenode_host
 is_jtnode_master = hostname in jtnode_host
@@ -97,20 +102,20 @@ hbase_tmp_dir = "/tmp/hbase-hbase"
 security_enabled = get_cluster_setting_value('security_enabled')
 
 #java params
-java_home = config['ambariLevelParams']['java_home']
+java_home = execution_command.get_java_home()
 artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/ambariLevelParams/jdk_name", None) # None when jdk is 
already installed by user
-jce_policy_zip = default("/ambariLevelParams/jce_name", None) # None when jdk 
is already installed by user
-jce_location = config['ambariLevelParams']['jdk_location']
-jdk_location = config['ambariLevelParams']['jdk_location']
+jdk_name = execution_command.get_jdk_name() # None when jdk is already 
installed by user
+jce_policy_zip = execution_command.get_jce_name() # None when jdk is already 
installed by user
+jce_location = execution_command.get_jdk_location()
+jdk_location = execution_command.get_jdk_location()
 ignore_groupsusers_create = 
get_cluster_setting_value('ignore_groupsusers_create')
-host_sys_prepped = default("/ambariLevelParams/host_sys_prepped", False)
+host_sys_prepped = execution_command.is_host_system_prepared()
 
 smoke_user_dirs = 
format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
 if has_hbase_masters:
   hbase_user_dirs = 
format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
 #repo params
-repo_info = config['hostLevelParams']['repoInfo']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)
+repo_info = execution_command.get_repo_info()
+service_repo_info = execution_command.get_service_repo_info()
 
-repo_file = default("/repositoryFile", None)
+repo_file = execution_command._execution_command.__get_value("repositoryFile")
diff --git 
a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py
 
b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py
index 04299ba..ab9646b 100644
--- 
a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py
+++ 
b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py
@@ -21,9 +21,10 @@ import os
 
 from resource_management.core.resources import Directory
 from resource_management.core.resources import Execute
-from resource_management.libraries.functions import default
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import format
+from resource_management.libraries.execution_command import execution_command
+from resource_management.libraries.execution_command import module_configs
 
 
 DEFAULT_HADOOP_HDFS_EXTENSION_DIR = "/hdp/ext/{0}/hadoop"
@@ -41,10 +42,10 @@ def setup_extensions():
   import params
 
   # Hadoop Custom extensions
-  hadoop_custom_extensions_enabled = 
default("/configurations/core-site/hadoop.custom-extensions.enabled", False)
-  hadoop_custom_extensions_services = 
default("/configurations/core-site/hadoop.custom-extensions.services", "")
-  hadoop_custom_extensions_owner = 
default("/configurations/core-site/hadoop.custom-extensions.owner", 
params.hdfs_user)
-  hadoop_custom_extensions_hdfs_dir = 
get_config_formatted_value(default("/configurations/core-site/hadoop.custom-extensions.root",
+  hadoop_custom_extensions_enabled = 
params.module_configs.get_property_value(params.module_name, 'core-site', 
'hadoop.custom-extensions.enabled', False)
+  hadoop_custom_extensions_services = 
params.module_configs.get_property_value(params.module_name, 'core-site', 
'hadoop.custom-extensions.services', "")
+  hadoop_custom_extensions_owner = 
params.module_configs.get_property_value(params.module_name, 'core-site', 
'hadoop.custom-extensions.owner', params.hdfs_user)
+  hadoop_custom_extensions_hdfs_dir = 
get_config_formatted_value(params.module_configs.get_property_value(params.module_name,
 'core-site', 'hadoop.custom-extensions.root',
                                                  
DEFAULT_HADOOP_HDFS_EXTENSION_DIR.format(params.major_stack_version)))
   hadoop_custom_extensions_services = [ service.strip().upper() for service in 
hadoop_custom_extensions_services.split(",") ]
   hadoop_custom_extensions_services.append("YARN")
@@ -70,14 +71,14 @@ def setup_hbase_extensions():
   import params
 
   # HBase Custom extensions
-  hbase_custom_extensions_enabled = 
default("/configurations/hbase-site/hbase.custom-extensions.enabled", False)
-  hbase_custom_extensions_owner = 
default("/configurations/hbase-site/hbase.custom-extensions.owner", 
params.hdfs_user)
-  hbase_custom_extensions_hdfs_dir = 
get_config_formatted_value(default("/configurations/hbase-site/hbase.custom-extensions.root",
+  hbase_custom_extensions_enabled = 
params.module_configs.get_property_value(params.module_name, 'hbase-site', 
'hbase.custom-extensions.enabled', False)
+  hbase_custom_extensions_owner = 
params.module_configs.get_property_value(params.module_name, 'hbase-site', 
'hbase.custom-extensions.owner', params.hdfs_user)
+  hbase_custom_extensions_hdfs_dir = 
get_config_formatted_value(params.module_configs.get_property_value(params.module_name,
 'hbase-site', 'hbase.custom-extensions.root',
                                                 
DEFAULT_HADOOP_HBASE_EXTENSION_DIR.format(params.major_stack_version)))
   hbase_custom_extensions_local_dir = 
"{0}/current/ext/hbase".format(Script.get_stack_root())
 
   impacted_components = ['HBASE_MASTER', 'HBASE_REGIONSERVER', 
'PHOENIX_QUERY_SERVER'];
-  role = params.config.get('role','')
+  role = params.execution_command.get_component_type()
 
   if role in impacted_components:
     clean_extensions(hbase_custom_extensions_local_dir)
@@ -90,14 +91,14 @@ def setup_hbase_extensions():
 def setup_extensions_hive():
   import params
 
-  hive_custom_extensions_enabled = 
default("/configurations/hive-site/hive.custom-extensions.enabled", False)
-  hive_custom_extensions_owner = 
default("/configurations/hive-site/hive.custom-extensions.owner", 
params.hdfs_user)
+  hive_custom_extensions_enabled = 
params.module_configs.get_property_value(params.module_name, 'hive-site', 
'hive.custom-extensions.enabled', False)
+  hive_custom_extensions_owner = 
params.module_configs.get_property_value(params.module_name, 'hive-site', 
'hive.custom-extensions.owner', params.hdfs_user)
   hive_custom_extensions_hdfs_dir = 
DEFAULT_HADOOP_HIVE_EXTENSION_DIR.format(params.major_stack_version)
 
   hive_custom_extensions_local_dir = 
"{0}/current/ext/hive".format(Script.get_stack_root())
 
-  impacted_components = ['HIVE_SERVER', 'HIVE_CLIENT'];
-  role = params.config.get('role','')
+  impacted_components = ['HIVE_SERVER', 'HIVE_CLIENT']
+  role = params.execution_command.get_component_type()
 
   # Run copying for HIVE_SERVER and HIVE_CLIENT
   if params.current_service == 'HIVE' and role in impacted_components:
diff --git 
a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py 
b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
index eeeea5e..40e95b6 100644
--- 
a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
@@ -22,7 +22,7 @@ import os
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.format_jvm_option import 
format_jvm_option_value
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions.version import 
format_stack_version, compare_versions, get_major_version
 from ambari_commons.os_check import OSCheck
@@ -33,10 +33,14 @@ from resource_management.libraries.resources.hdfs_resource 
import HdfsResource
 from resource_management.libraries.functions.stack_features import 
check_stack_feature
 from resource_management.libraries.functions.stack_features import 
get_stack_feature_version
 from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.execution_command import execution_command
+from resource_management.libraries.execution_command import module_configs
 from ambari_commons.constants import AMBARI_SUDO_BINARY
 from resource_management.libraries.functions.cluster_settings import 
get_cluster_setting_value
 
-config = Script.get_config()
+execution_command = Script.get_execution_command()
+module_configs = Script.get_module_configs()
+module_name = execution_command.get_module_name()
 tmp_dir = Script.get_tmp_dir()
 artifact_dir = tmp_dir + "/AMBARI-artifacts"
 
@@ -46,7 +50,7 @@ stack_supports_hadoop_custom_extensions = 
check_stack_feature(StackFeature.HADOO
 sudo = AMBARI_SUDO_BINARY
 
 # Global flag enabling or disabling the sysprep feature
-host_sys_prepped = default("/ambariLevelParams/host_sys_prepped", False)
+host_sys_prepped = execution_command.is_host_system_prepared()
 
 # Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/
 # This is required if tarballs are going to be copied to HDFS, so set to False
@@ -55,19 +59,17 @@ sysprep_skip_copy_fast_jar_hdfs = host_sys_prepped and 
get_cluster_setting_value
 # Whether to skip setting up the unlimited key JCE policy
 sysprep_skip_setup_jce = host_sys_prepped and 
get_cluster_setting_value('sysprep_skip_setup_jce')
 
-stack_version_unformatted = config['clusterLevelParams']['stack_version']
+stack_version_unformatted = execution_command.get_mpack_version()
 stack_version_formatted = format_stack_version(stack_version_unformatted)
 major_stack_version = get_major_version(stack_version_formatted)
 
-dfs_type = default("/commandParams/dfs_type", "")
+dfs_type = execution_command.get_dfs_type()
 hadoop_conf_dir = "/etc/hadoop/conf"
-component_list = default("/localComponents", [])
+component_list = 
execution_command._execution_command.__get_value("localComponents", [])
 
-hdfs_tmp_dir = default("/configurations/hadoop-env/hdfs_tmp_dir", "/tmp")
+hdfs_tmp_dir = module_configs.get_property_value(module_name, 'hadoop-env', 
'hdfs_tmp_dir', '/tmp')
 
-hadoop_metrics2_properties_content = None
-if 'hadoop-metrics2.properties' in config['configurations']:
-  hadoop_metrics2_properties_content = 
config['configurations']['hadoop-metrics2.properties']['content']
+hadoop_metrics2_properties_content = 
module_configs.get_property_value(module_name, 'hadoop-metrics2.properties', 
'content')
 
 hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
 hadoop_lib_home = stack_select.get_hadoop_dir("lib")
@@ -77,52 +79,51 @@ mapreduce_libs_path = 
"/usr/hdp/current/hadoop-mapreduce-client/*"
 hadoop_home = stack_select.get_hadoop_dir("home")
 create_lib_snappy_symlinks = False
   
-current_service = config['serviceName']
+current_service = module_name
 
 #security params
 security_enabled = get_cluster_setting_value('security_enabled')
 
-ambari_server_resources_url = default("/ambariLevelParams/jdk_location", None)
-if ambari_server_resources_url is not None and 
ambari_server_resources_url.endswith('/'):
+ambari_server_resources_url = execution_command.get_jdk_location()
+if ambari_server_resources_url and ambari_server_resources_url.endswith('/'):
   ambari_server_resources_url = ambari_server_resources_url[:-1]
 
 # Unlimited key JCE policy params
-jce_policy_zip = default("/ambariLevelParams/jce_name", None) # None when jdk 
is already installed by user
-unlimited_key_jce_required = 
default("/componentLevelParams/unlimited_key_jce_required", False)
-jdk_name = default("/ambariLevelParams/jdk_name", None)
-java_home = default("/ambariLevelParams/java_home", None)
+jce_policy_zip = execution_command.get_jce_name() # None when jdk is already 
installed by user
+unlimited_key_jce_required = 
execution_command.check_unlimited_key_jce_required()
+jdk_name = execution_command.get_jdk_name()
+java_home = execution_command.get_java_home()
 java_exec = "{0}/bin/java".format(java_home) if java_home is not None else 
"/bin/java"
 
 #users and groups
-has_hadoop_env = 'hadoop-env' in config['configurations']
-mapred_user = config['configurations']['mapred-env']['mapred_user']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-yarn_user = config['configurations']['yarn-env']['yarn_user']
+has_hadoop_env = 'hadoop-env' in module_configs
+mapred_user = module_configs.get_property_value(module_name, 'mapred-env', 
'mapred_user')
+hdfs_user = module_configs.get_property_value(module_name, 'hadoop-env', 
'hdfs_user')
+yarn_user = module_configs.get_property_value(module_name, 'yarn-env', 
'yarn_user')
 
 user_group = get_cluster_setting_value('user_group')
 
 #hosts
-hostname = config['agentLevelParams']['hostname']
-ambari_server_hostname = config['ambariLevelParams']['ambari_server_host']
-rm_host = default("/clusterHostInfo/resourcemanager_hosts", [])
-slave_hosts = default("/clusterHostInfo/datanode_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_hosts", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_hosts", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/historyserver_hosts", [])
-jtnode_host = default("/clusterHostInfo/jtnode_hosts", [])
-namenode_host = default("/clusterHostInfo/namenode_hosts", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_server_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_hosts", [])
-cluster_name = config["clusterName"]
+hostname = execution_command.get_host_name()
+ambari_server_hostname = execution_command.get_ambari_server_host()
+rm_host = 
execution_command._execution_command.__get_value("clusterHostInfo/resourcemanager_hosts",
 [])
+slave_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/datanode_hosts",
 [])
+oozie_servers = 
execution_command._execution_command.__get_value("clusterHostInfo/oozie_server",
 [])
+hcat_server_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/webhcat_server_hosts",
 [])
+hive_server_host =  
execution_command._execution_command.__get_value("clusterHostInfo/hive_server_hosts",
 [])
+hbase_master_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/hbase_master_hosts",
 [])
+hs_host = 
execution_command._execution_command.__get_value("clusterHostInfo/historyserver_hosts",
 [])
+jtnode_host = 
execution_command._execution_command.__get_value("clusterHostInfo/jtnode_hosts",
 [])
+namenode_host = 
execution_command._execution_command.__get_value("clusterHostInfo/namenode_hosts",
 [])
+zk_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/zookeeper_server_hosts",
 [])
+ganglia_server_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/ganglia_server_hosts",
 [])
+cluster_name = execution_command.get_cluster_name()
 set_instanceId = "false"
-if 'cluster-env' in config['configurations'] and \
-    'metrics_collector_external_hosts' in 
config['configurations']['cluster-env']:
-  ams_collector_hosts = 
config['configurations']['cluster-env']['metrics_collector_external_hosts']
+ams_collector_hosts = module_configs.get_property_value(module_name, 
'cluster-env', 'metrics_collector_external_hosts')
+if ams_collector_hosts:
   set_instanceId = "true"
 else:
-  ams_collector_hosts = 
",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+  ams_collector_hosts = 
",".join(execution_command._execution_command.__get_value("clusterHostInfo/metrics_collector_hosts",
 []))
 
 has_namenode = not len(namenode_host) == 0
 has_resourcemanager = not len(rm_host) == 0
@@ -145,40 +146,35 @@ is_slave = hostname in slave_hosts
 if has_ganglia_server:
   ganglia_server_host = ganglia_server_hosts[0]
 
-metric_collector_port = None
+metric_collector_port = module_configs.get_property_value(module_name, 
'cluster-env', 'metrics_collector_external_port')
 if has_metric_collector:
-  if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_external_port' in 
config['configurations']['cluster-env']:
-    metric_collector_port = 
config['configurations']['cluster-env']['metrics_collector_external_port']
-  else:
-    metric_collector_web_address = 
default("/configurations/ams-site/timeline.metrics.service.webapp.address", 
"0.0.0.0:6188")
+  if not metric_collector_port:
+    metric_collector_web_address = 
module_configs.get_property_value(module_name, 'ams-env', 
'timeline.metrics.service.webapp.address', '0.0.0.0:6188')
     if metric_collector_web_address.find(':') != -1:
       metric_collector_port = metric_collector_web_address.split(':')[1]
     else:
       metric_collector_port = '6188'
-  if default("/configurations/ams-site/timeline.metrics.service.http.policy", 
"HTTP_ONLY") == "HTTPS_ONLY":
+  if module_configs.get_property_value(module_name, 'ams-env', 
'timeline.metrics.service.http.policy', 'HTTP_ONLY') == "HTTPS_ONLY":
     metric_collector_protocol = 'https'
   else:
     metric_collector_protocol = 'http'
-  metric_truststore_path= 
default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
-  metric_truststore_type= 
default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
-  metric_truststore_password= 
default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+  metric_truststore_path= module_configs.get_property_value(module_name, 
'ams-ssl-client', 'ams-ssl-client/ssl.client.truststore.location', '')
+  metric_truststore_type= module_configs.get_property_value(module_name, 
'ams-ssl-client', 'ams-ssl-client/ssl.client.truststore.type', '')
+  metric_truststore_password= module_configs.get_property_value(module_name, 
'ams-ssl-client', 'ssl.client.truststore.password', '')
 
   pass
-metrics_report_interval = 
default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
-metrics_collection_period = 
default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+metrics_report_interval = module_configs.get_property_value(module_name, 
'ams-site', 'timeline.metrics.sink.report.interval', 60)
+metrics_collection_period = module_configs.get_property_value(module_name, 
'ams-site', 'timeline.metrics.sink.collection.period', 10)
 
-host_in_memory_aggregation = 
default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", 
True)
-host_in_memory_aggregation_port = 
default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port",
 61888)
+host_in_memory_aggregation = module_configs.get_property_value(module_name, 
'ams-site', 'timeline.metrics.host.inmemory.aggregation', True)
+host_in_memory_aggregation_port = 
module_configs.get_property_value(module_name, 'ams-site', 
'timeline.metrics.host.inmemory.aggregation.port', 61888)
 
 # Cluster Zookeeper quorum
-zookeeper_quorum = None
+zookeeper_quorum = module_configs.get_property_value(module_name, 'zoo.cfg', 
'clientPort')
 if has_zk_host:
-  if 'zoo.cfg' in config['configurations'] and 'clientPort' in 
config['configurations']['zoo.cfg']:
-    zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
-  else:
+  if not zookeeper_quorum:
     zookeeper_clientPort = '2181'
-  zookeeper_quorum = (':' + zookeeper_clientPort + 
',').join(config['clusterHostInfo']['zookeeper_server_hosts'])
+  zookeeper_quorum = (':' + zookeeper_clientPort + 
',').join(execution_command._execution_command.__get_value("clusterHostInfo/zookeeper_server_hosts"))
   # last port config
   zookeeper_quorum += ':' + zookeeper_clientPort
 
@@ -189,15 +185,15 @@ if has_namenode or dfs_type == 'HCFS':
   hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
   task_log4j_properties_location = os.path.join(hadoop_conf_dir, 
"task-log4j.properties")
 
-hadoop_pid_dir_prefix = 
config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hdfs_log_dir_prefix = 
config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = module_configs.get_property_value(module_name, 
'hadoop-env', 'hadoop_pid_dir_prefix')
+hdfs_log_dir_prefix = module_configs.get_property_value(module_name, 
'hadoop-env', 'hdfs_log_dir_prefix')
 hbase_tmp_dir = "/tmp/hbase-hbase"
 #db params
 oracle_driver_symlink_url = 
format("{ambari_server_resources_url}/oracle-jdbc-driver.jar")
 mysql_driver_symlink_url = 
format("{ambari_server_resources_url}/mysql-jdbc-driver.jar")
 
-if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
-  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
+if has_namenode:
+  rca_enabled = module_configs.get_property_value(module_name, 'hadoop-env', 
'rca_enabled', False)
 else:
   rca_enabled = False
 rca_disabled_prefix = "###"
@@ -210,64 +206,58 @@ else:
 
 jsvc_path = "/usr/lib/bigtop-utils"
 
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = 
config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = 
config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = 
format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = 
format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+hadoop_heapsize = module_configs.get_property_value(module_name, 'hadoop-env', 
'hadoop_heapsize')
+namenode_heapsize = module_configs.get_property_value(module_name, 
'hadoop-env', 'namenode_heapsize')
+namenode_opt_newsize = module_configs.get_property_value(module_name, 
'hadoop-env', 'namenode_opt_newsize')
+namenode_opt_maxnewsize = module_configs.get_property_value(module_name, 
'hadoop-env', 'namenode_opt_maxnewsize')
+namenode_opt_permsize = 
format_jvm_option_value(module_configs.get_property_value(module_name, 
'hadoop-env', 'namenode_opt_permsize', '128m'))
+namenode_opt_maxpermsize = 
format_jvm_option_value(module_configs.get_property_value(module_name, 
'hadoop-env', 'namenode_opt_maxpermsize', '256m'))
 
 jtnode_opt_newsize = "200m"
 jtnode_opt_maxnewsize = "200m"
 jtnode_heapsize =  "1024m"
 ttnode_heapsize = "1024m"
 
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = 
default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = 
default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+dtnode_heapsize = module_configs.get_property_value(module_name, 'hadoop-env', 
'dtnode_heapsize')
+mapred_pid_dir_prefix = module_configs.get_property_value(module_name, 
'mapred-env', 'mapred_pid_dir_prefix', '/var/run/hadoop-mapreduce')
+mapred_log_dir_prefix = module_configs.get_property_value(module_name, 
'mapred-env', 'mapred_log_dir_prefix', '/var/log/hadoop-mapreduce')
 
 #log4j.properties
 
-yarn_log_dir_prefix = 
default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
+yarn_log_dir_prefix = module_configs.get_property_value(module_name, 
'yarn-env', 'yarn_log_dir_prefix', '/var/log/hadoop-yarn')
 
-dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
+dfs_hosts = module_configs.get_property_value(module_name, 'hdfs-site', 
'dfs.hosts')
 
 # Hdfs log4j settings
-hadoop_log_max_backup_size = 
default('configurations/hdfs-log4j/hadoop_log_max_backup_size', 256)
-hadoop_log_number_of_backup_files = 
default('configurations/hdfs-log4j/hadoop_log_number_of_backup_files', 10)
-hadoop_security_log_max_backup_size = 
default('configurations/hdfs-log4j/hadoop_security_log_max_backup_size', 256)
-hadoop_security_log_number_of_backup_files = 
default('configurations/hdfs-log4j/hadoop_security_log_number_of_backup_files', 
20)
+hadoop_log_max_backup_size = module_configs.get_property_value(module_name, 
'hdfs-log4j', 'hadoop_log_max_backup_size', 256)
+hadoop_log_number_of_backup_files = 
module_configs.get_property_value(module_name, 'hdfs-log4j', 
'hadoop_log_number_of_backup_files', 10)
+hadoop_security_log_max_backup_size = 
module_configs.get_property_value(module_name, 'hdfs-log4j', 
'hadoop_security_log_max_backup_size', 256)
+hadoop_security_log_number_of_backup_files = 
module_configs.get_property_value(module_name, 'hdfs-log4j', 
'hadoop_security_log_number_of_backup_files', 20)
 
 # Yarn log4j settings
-yarn_rm_summary_log_max_backup_size = 
default('configurations/yarn-log4j/yarn_rm_summary_log_max_backup_size', 256)
-yarn_rm_summary_log_number_of_backup_files = 
default('configurations/yarn-log4j/yarn_rm_summary_log_number_of_backup_files', 
20)
+yarn_rm_summary_log_max_backup_size = 
module_configs.get_property_value(module_name, 'yarn-log4j', 
'yarn_rm_summary_log_max_backup_size', 256)
+yarn_rm_summary_log_number_of_backup_files = 
module_configs.get_property_value(module_name, 'yarn-log4j', 
'yarn_rm_summary_log_number_of_backup_files', 20)
 
 #log4j.properties
-if (('hdfs-log4j' in config['configurations']) and ('content' in 
config['configurations']['hdfs-log4j'])):
-  log4j_props = config['configurations']['hdfs-log4j']['content']
-  if (('yarn-log4j' in config['configurations']) and ('content' in 
config['configurations']['yarn-log4j'])):
-    log4j_props += config['configurations']['yarn-log4j']['content']
-else:
-  log4j_props = None
+log4j_props = module_configs.get_property_value(module_name, 'hdfs-log4j', 
'content')
+if log4j_props:
+  log4j_props += module_configs.get_property_value(module_name, 'yarn-log4j', 
'content')
 
-refresh_topology = False
-command_params = config["commandParams"] if "commandParams" in config else None
-if command_params is not None:
-  refresh_topology = bool(command_params["refresh_topology"]) if 
"refresh_topology" in command_params else False
+refresh_topology = execution_command.need_refresh_topology()
 
-ambari_java_home = default("/commandParams/ambari_java_home", None)
-ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
-ambari_jce_name = default("/commandParams/ambari_jce_name", None)
+ambari_java_home = execution_command.get_ambari_java_home()
+ambari_jdk_name = execution_command.get_ambari_jdk_name()
+ambari_jce_name = execution_command.get_ambari_jce_name()
   
 ambari_libs_dir = "/var/lib/ambari-agent/lib"
-is_webhdfs_enabled = 
config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
+is_webhdfs_enabled = module_configs.get_property_value(module_name, 
'hdfs-site', 'dfs.webhdfs.enabled')
+default_fs = module_configs.get_property_value(module_name, 'core-site', 
'fs.defaultFS')
 
 #host info
-all_hosts = default("/clusterHostInfo/all_hosts", [])
-all_racks = default("/clusterHostInfo/all_racks", [])
-all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
-slave_hosts = default("/clusterHostInfo/datanode_hosts", [])
+all_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/all_hosts", 
[])
+all_racks = 
execution_command._execution_command.__get_value("clusterHostInfo/all_racks", 
[])
+all_ipv4_ips = 
execution_command._execution_command.__get_value("clusterHostInfo/all_ipv4_ips",
 [])
+slave_hosts = 
execution_command._execution_command.__get_value("clusterHostInfo/datanode_hosts",
 [])
 
 #topology files
 net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
@@ -276,14 +266,14 @@ net_topology_mapping_data_file_name = 
'topology_mappings.data'
 net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, 
net_topology_mapping_data_file_name)
 
 #Added logic to create /tmp and /user directory for HCFS stack.  
-has_core_site = 'core-site' in config['configurations']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+has_core_site = 'core-site' in module_configs
+hdfs_user_keytab = module_configs.get_property_value(module_name, 
'hadoop-env', 'hdfs_user_keytab')
 kinit_path_local = get_kinit_path()
-stack_version_unformatted = config['clusterLevelParams']['stack_version']
+stack_version_unformatted = execution_command.get_mpack_version()
 stack_version_formatted = format_stack_version(stack_version_unformatted)
 hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-hdfs_principal_name = 
default('/configurations/hadoop-env/hdfs_principal_name', None)
-hdfs_site = config['configurations']['hdfs-site']
+hdfs_principal_name = module_configs.get_property_value(module_name, 
'hadoop-env', 'hdfs_principal_name')
+hdfs_site = module_configs.get_property_value(module_name, 'hdfs-site', '')
 smoke_user =  get_cluster_setting_value('smokeuser')
 smoke_hdfs_user_dir = format("/user/{smoke_user}")
 smoke_hdfs_user_mode = 0770
@@ -300,10 +290,10 @@ namenode_id = None
 namenode_rpc = None
 
 dfs_ha_enabled = False
-dfs_ha_nameservices = 
default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+dfs_ha_nameservices = module_configs.get_property_value(module_name, 
'hdfs-site', 'dfs.internal.nameservices')
 if dfs_ha_nameservices is None:
-  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', 
None)
-dfs_ha_namenode_ids = 
default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"),
 None)
+  dfs_ha_nameservices = module_configs.get_property_value(module_name, 
'hdfs-site', 'dfs.nameservices')
+dfs_ha_namenode_ids = module_configs.get_property_value(module_name, 
'hdfs-site', 'dfs.ha.namenodes.{dfs_ha_nameservices}')
 
 dfs_ha_namemodes_ids_list = []
 other_namenode_id = None
@@ -316,17 +306,17 @@ if dfs_ha_namenode_ids:
 
 if dfs_ha_enabled:
  for nn_id in dfs_ha_namemodes_ids_list:
-   nn_host = 
config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+   nn_host = module_configs.get_property_value(module_name, 'hdfs-site', 
format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}'))
    if hostname.lower() in nn_host.lower():
      namenode_id = nn_id
      namenode_rpc = nn_host
    pass
  pass
 else:
-  namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', 
default_fs)
+  namenode_rpc = module_configs.get_property_value(module_name, 'hdfs-site', 
'dfs.namenode.rpc-address', default_fs)
 
 # if HDFS is not installed in the cluster, then don't try to access 
namenode_rpc
-if has_namenode and namenode_rpc and "core-site" in config['configurations']:
+if has_namenode and namenode_rpc and 'core-site' in module_configs:
   port_str = namenode_rpc.split(':')[-1].strip()
   try:
     nn_rpc_client_port = int(port_str)
@@ -334,11 +324,11 @@ if has_namenode and namenode_rpc and "core-site" in 
config['configurations']:
     nn_rpc_client_port = None
 
 if dfs_ha_enabled:
- dfs_service_rpc_address = 
default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'),
 None)
- dfs_lifeline_rpc_address = 
default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}'),
 None)
+ dfs_service_rpc_address = module_configs.get_property_value(module_name, 
'hdfs-site', 
'dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}')
+ dfs_lifeline_rpc_address = module_configs.get_property_value(module_name, 
'hdfs-site', 
'dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}')
 else:
- dfs_service_rpc_address = 
default('/configurations/hdfs-site/dfs.namenode.servicerpc-address', None)
- dfs_lifeline_rpc_address = 
default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address'), 
None)
+ dfs_service_rpc_address = module_configs.get_property_value(module_name, 
'hdfs-site', 'dfs.namenode.servicerpc-address')
+ dfs_lifeline_rpc_address = module_configs.get_property_value(module_name, 
'hdfs-site', 'dfs.namenode.lifeline.rpc-address')
 
 if dfs_service_rpc_address:
  nn_rpc_dn_port = dfs_service_rpc_address.split(':')[1].strip()

-- 
To stop receiving notification emails like this one, please contact
jlun...@apache.org.

Reply via email to