This is an automated email from the ASF dual-hosted git repository.

mradhakrishnan pushed a commit to branch branch-feature-AMBARI-14714
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-feature-AMBARI-14714 by 
this push:
     new 8383d16  Branch feature ambari 14714 (#307)
8383d16 is described below

commit 8383d160f2c6c7d6477de27b175485bdf843fc32
Author: mradha25 <mradhakrish...@hortonworks.com>
AuthorDate: Fri Feb 9 12:55:32 2018 -0800

    Branch feature ambari 14714 (#307)
    
    * [AMBARI-22948] stack-hooks has to be updated to use cluster-settings 
instead of cluster-env
    
    * [AMBARI-22938] Fix SQL issue due to trunk merge issue
---
 .../src/main/resources/Ambari-DDL-Postgres-CREATE.sql     |  2 +-
 .../resources/stack-hooks/after-INSTALL/scripts/params.py |  5 +++--
 .../resources/stack-hooks/before-ANY/scripts/params.py    | 15 ++++++++-------
 .../stack-hooks/before-INSTALL/scripts/params.py          | 13 +++++++------
 .../resources/stack-hooks/before-START/scripts/params.py  | 11 ++++++-----
 5 files changed, 25 insertions(+), 21 deletions(-)

diff --git a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql 
b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
index 20b777d..bbd0239 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
@@ -1287,7 +1287,7 @@ INSERT INTO ambari_sequences (sequence_name, 
sequence_value) VALUES
   ('servicecomponent_version_id_seq', 0),
   ('blueprint_service_id_seq', 0),
   ('blueprint_mpack_instance_id_seq', 0),
-  ('hostgroup_component_id_seq', 0);
+  ('hostgroup_component_id_seq', 0),
   ('repo_os_id_seq', 0),
   ('repo_definition_id_seq', 0),
   ('hostcomponentdesiredstate_id_seq', 0);
diff --git 
a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py 
b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py
index 4eecfa2..8d7b31d 100644
--- 
a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py
@@ -27,6 +27,7 @@ from resource_management.libraries.functions import 
conf_select
 from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format_jvm_option
 from resource_management.libraries.functions.version import 
format_stack_version, get_major_version
+from resource_management.libraries.functions.cluster_settings import 
get_cluster_setting_value
 from string import lower
 
 config = Script.get_config()
@@ -64,7 +65,7 @@ mapreduce_libs_path = 
"/usr/hdp/current/hadoop-mapreduce-client/*"
 versioned_stack_root = '/usr/hdp/current'
 
 #security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
+security_enabled = get_cluster_setting_value('security_enabled')
 
 #java params
 java_home = config['hostLevelParams']['java_home']
@@ -94,7 +95,7 @@ mapred_log_dir_prefix = 
default("/configurations/mapred-env/mapred_log_dir_prefi
 
 #users and groups
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-user_group = config['configurations']['cluster-env']['user_group']
+user_group = get_cluster_setting_value('user_group')
 
 namenode_host = default("/clusterHostInfo/namenode_host", [])
 has_namenode = not len(namenode_host) == 0
diff --git 
a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py 
b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py
index e050db5..09ed2e1 100644
--- a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py
@@ -37,6 +37,7 @@ from resource_management.libraries.functions import 
StackFeature
 from resource_management.libraries.functions.stack_features import 
check_stack_feature
 from resource_management.libraries.functions.stack_features import 
get_stack_feature_version
 from resource_management.libraries.functions.get_architecture import 
get_architecture
+from resource_management.libraries.functions.cluster_settings import 
get_cluster_setting_value
 from ambari_commons.constants import AMBARI_SUDO_BINARY
 
 
@@ -72,7 +73,7 @@ if (upgrade_type is not None) and version:
 ambari_java_home = default("/commandParams/ambari_java_home", None)
 ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
 
-security_enabled = config['configurations']['cluster-env']['security_enabled']
+security_enabled = get_cluster_setting_value('security_enabled')
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 
 # Some datanode settings
@@ -163,7 +164,7 @@ hadoop_env_sh_template = 
config['configurations']['hadoop-env']['content']
 
 #users and groups
 hbase_user = config['configurations']['hbase-env']['hbase_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
+smoke_user =  get_cluster_setting_value('smokeuser')
 gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
 gmond_user = config['configurations']['ganglia-env']["gmond_user"]
 tez_user = config['configurations']['tez-env']["tez_user"]
@@ -173,7 +174,7 @@ ranger_user = 
config['configurations']['ranger-env']["ranger_user"]
 zeppelin_user = config['configurations']['zeppelin-env']["zeppelin_user"]
 zeppelin_group = config['configurations']['zeppelin-env']["zeppelin_group"]
 
-user_group = config['configurations']['cluster-env']['user_group']
+user_group = get_cluster_setting_value('user_group')
 
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
 namenode_host = default("/clusterHostInfo/namenode_host", [])
@@ -219,9 +220,9 @@ proxyuser_group = 
default("/configurations/hadoop-env/proxyuser_group","users")
 ranger_group = config['configurations']['ranger-env']['ranger_group']
 dfs_cluster_administrators_group = 
config['configurations']['hdfs-site']["dfs.cluster.administrators"]
 
-sysprep_skip_create_users_and_groups = 
default("/configurations/cluster-env/sysprep_skip_create_users_and_groups", 
False)
-ignore_groupsusers_create = 
default("/configurations/cluster-env/ignore_groupsusers_create", False)
-fetch_nonlocal_groups = 
config['configurations']['cluster-env']["fetch_nonlocal_groups"]
+sysprep_skip_create_users_and_groups = 
get_cluster_setting_value('sysprep_skip_create_users_and_groups')
+ignore_groupsusers_create = 
get_cluster_setting_value('ignore_groupsusers_create')
+fetch_nonlocal_groups = get_cluster_setting_value('fetch_nonlocal_groups')
 
 smoke_user_dirs = 
format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
 if has_hbase_masters:
@@ -247,7 +248,7 @@ group_list = 
set(json.loads(config['hostLevelParams']['group_list']) + [user_gro
 host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
 
 tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
-override_uid = str(default("/configurations/cluster-env/override_uid", 
"true")).lower()
+override_uid = get_cluster_setting_value('override_uid')
 
 # if NN HA on secure clutser, access Zookeper securely
 if stack_supports_zk_security and dfs_ha_enabled and security_enabled:
diff --git 
a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py 
b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py
index 50c5a40..b4f48e3 100644
--- 
a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py
@@ -23,6 +23,7 @@ from resource_management.core.system import System
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import default, format
 from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.functions.cluster_settings import 
get_cluster_setting_value
 
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
@@ -35,19 +36,19 @@ stack_version_formatted = 
format_stack_version(stack_version_unformatted)
 
 #users and groups
 hbase_user = config['configurations']['hbase-env']['hbase_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
+smoke_user = get_cluster_setting_value('smokeuser')
 gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
 gmond_user = config['configurations']['ganglia-env']["gmond_user"]
 tez_user = config['configurations']['tez-env']["tez_user"]
 
-user_group = config['configurations']['cluster-env']['user_group']
+user_group = get_cluster_setting_value('user_group')
 proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
 
 hdfs_log_dir_prefix = 
config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 
 # repo templates
-repo_rhel_suse =  
config['configurations']['cluster-env']['repo_suse_rhel_template']
-repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']
+repo_rhel_suse =  get_cluster_setting_value('repo_suse_rhel_template')
+repo_ubuntu =  get_cluster_setting_value('repo_ubuntu_template')
 
 #hosts
 hostname = config["hostname"]
@@ -93,7 +94,7 @@ if has_ganglia_server:
 hbase_tmp_dir = "/tmp/hbase-hbase"
 
 #security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
+security_enabled = get_cluster_setting_value('security_enabled')
 
 #java params
 java_home = config['hostLevelParams']['java_home']
@@ -102,7 +103,7 @@ jdk_name = default("/hostLevelParams/jdk_name", None) # 
None when jdk is already
 jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is 
already installed by user
 jce_location = config['hostLevelParams']['jdk_location']
 jdk_location = config['hostLevelParams']['jdk_location']
-ignore_groupsusers_create = 
default("/configurations/cluster-env/ignore_groupsusers_create", False)
+ignore_groupsusers_create = 
get_cluster_setting_value('ignore_groupsusers_create')
 host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
 
 smoke_user_dirs = 
format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
diff --git 
a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py 
b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
index 6c26e01..34f85b0 100644
--- 
a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
@@ -34,6 +34,7 @@ from resource_management.libraries.functions.stack_features 
import check_stack_f
 from resource_management.libraries.functions.stack_features import 
get_stack_feature_version
 from resource_management.libraries.functions import StackFeature
 from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.functions.cluster_settings import 
get_cluster_setting_value
 
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
@@ -49,10 +50,10 @@ host_sys_prepped = 
default("/hostLevelParams/host_sys_prepped", False)
 
 # Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/
 # This is required if tarballs are going to be copied to HDFS, so set to False
-sysprep_skip_copy_fast_jar_hdfs = host_sys_prepped and 
default("/configurations/cluster-env/sysprep_skip_copy_fast_jar_hdfs", False)
+sysprep_skip_copy_fast_jar_hdfs = host_sys_prepped and 
get_cluster_setting_value('sysprep_skip_copy_fast_jar_hdfs')
 
 # Whether to skip setting up the unlimited key JCE policy
-sysprep_skip_setup_jce = host_sys_prepped and 
default("/configurations/cluster-env/sysprep_skip_setup_jce", False)
+sysprep_skip_setup_jce = host_sys_prepped and 
get_cluster_setting_value('sysprep_skip_setup_jce')
 
 stack_version_unformatted = config['hostLevelParams']['stack_version']
 stack_version_formatted = format_stack_version(stack_version_unformatted)
@@ -79,7 +80,7 @@ create_lib_snappy_symlinks = False
 current_service = config['serviceName']
 
 #security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
+security_enabled = get_cluster_setting_value('security_enabled')
 
 ambari_server_resources_url = default("/hostLevelParams/jdk_location", None)
 if ambari_server_resources_url is not None and 
ambari_server_resources_url.endswith('/'):
@@ -98,7 +99,7 @@ mapred_user = 
config['configurations']['mapred-env']['mapred_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 yarn_user = config['configurations']['yarn-env']['yarn_user']
 
-user_group = config['configurations']['cluster-env']['user_group']
+user_group = get_cluster_setting_value('user_group')
 
 #hosts
 hostname = config["hostname"]
@@ -292,7 +293,7 @@ stack_version_formatted = 
format_stack_version(stack_version_unformatted)
 hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 hdfs_principal_name = 
default('/configurations/hadoop-env/hdfs_principal_name', None)
 hdfs_site = config['configurations']['hdfs-site']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
+smoke_user =  get_cluster_setting_value('smokeuser')
 smoke_hdfs_user_dir = format("/user/{smoke_user}")
 smoke_hdfs_user_mode = 0770
 

-- 
To stop receiving notification emails like this one, please contact
mradhakrish...@apache.org.

Reply via email to