This is an automated email from the ASF dual-hosted git repository. jluniya pushed a commit to branch branch-feature-AMBARI-14714 in repository https://gitbox.apache.org/repos/asf/ambari.git
The following commit(s) were added to refs/heads/branch-feature-AMBARI-14714 by this push: new 66318a6 [AMBARI-23408] ExecutionCommand object has no attribute _execution_command (#892) 66318a6 is described below commit 66318a67a01d6177f73ef01774066db9cfdaa459 Author: sduan <sd...@hortonworks.com> AuthorDate: Thu Apr 5 23:50:58 2018 -0700 [AMBARI-23408] ExecutionCommand object has no attribute _execution_command (#892) * AMBARI-23408: Update all references to command.json in stack-hooks to use new execution_command library * AMBARI-23408: Update all references to command.json in stack-hooks to use new execution_command library * AMBARI-23408: Continued: using new execution_command api to avoid accessing private field directly * AMBARI-23408: Continued: using new execution_command api to avoid accessing private field directly --- .../stack-hooks/after-INSTALL/scripts/params.py | 2 +- .../after-INSTALL/scripts/shared_initialization.py | 4 +- .../stack-hooks/before-ANY/scripts/params.py | 16 ++++---- .../before-ANY/scripts/shared_initialization.py | 2 +- .../stack-hooks/before-INSTALL/scripts/params.py | 34 +++++++-------- .../stack-hooks/before-START/scripts/params.py | 48 +++++++++++----------- 6 files changed, 53 insertions(+), 53 deletions(-) diff --git a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py index bc81dd6..9434c13 100644 --- a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py +++ b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py @@ -100,7 +100,7 @@ mapred_log_dir_prefix = module_configs.get_property_value(module_name, 'mapred-e hdfs_user = module_configs.get_property_value(module_name, 'hadoop-env', 'hdfs_user') user_group = get_cluster_setting_value('user_group') -namenode_host = execution_command._execution_command.__get_value("clusterHostInfo/namenode_hosts", []) +namenode_host = execution_command.get_component_hosts('namenode') has_namenode = not len(namenode_host) == 0 if has_namenode or dfs_type == 'HCFS': diff --git a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py index 3febc85..9a596fe 100644 --- a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py +++ b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py @@ -83,8 +83,8 @@ def setup_config(): # create core-site only if the hadoop config diretory exists XmlConfig("core-site.xml", conf_dir=params.hadoop_conf_dir, - configurations=params.module_configs.get_property_value(params.module_name, 'core-site', ''), - configuration_attributes=params.execution_command._execution_command.__get_value("configurationAttributes/core-site"), + configurations=params.module_configs.get_all_properties(params.module_name, 'core-site'), + configuration_attributes=params.execution_command.get_all_attributes(params.module_name, 'core-site'), owner=params.hdfs_user, group=params.user_group, only_if=format("ls {hadoop_conf_dir}")) diff --git a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py index 50168e1..2f88583 100644 --- a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py +++ b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py @@ -187,13 +187,13 @@ zeppelin_group = module_configs.get_property_value(module_name, 'zeppelin-env', user_group = get_cluster_setting_value('user_group') -ganglia_server_hosts = execution_command._execution_command.__get_value("clusterHostInfo/ganglia_server_hosts", []) -namenode_host = execution_command._execution_command.__get_value("clusterHostInfo/namenode_hosts", []) -hbase_master_hosts = execution_command._execution_command.__get_value("clusterHostInfo/hbase_master_hosts", []) -oozie_servers = execution_command._execution_command.__get_value("clusterHostInfo/oozie_server", []) -falcon_server_hosts = execution_command._execution_command.__get_value("clusterHostInfo/falcon_server_hosts", []) -ranger_admin_hosts = execution_command._execution_command.__get_value("clusterHostInfo/ranger_admin_hosts", []) -zeppelin_master_hosts = execution_command._execution_command.__get_value("clusterHostInfo/zeppelin_master_hosts", []) +ganglia_server_hosts = execution_command.get_component_hosts('ganglia_server') +namenode_host = execution_command.get_component_hosts('namenode') +hbase_master_hosts = execution_command.get_component_hosts('hbase_master') +oozie_servers = execution_command.get_component_hosts('oozie_server') +falcon_server_hosts = execution_command.get_component_hosts('falcon_server') +ranger_admin_hosts = execution_command.get_component_hosts('ranger_admin') +zeppelin_master_hosts = execution_command.get_component_hosts('zeppelin_master') # get the correct version to use for checking stack features version_for_stack_feature_checks = get_stack_feature_version(config) @@ -201,7 +201,7 @@ version_for_stack_feature_checks = get_stack_feature_version(config) has_namenode = not len(namenode_host) == 0 has_ganglia_server = not len(ganglia_server_hosts) == 0 -has_tez = module_configs.get_property_value(module_name, 'tez-site', '') is not None +has_tez = bool(module_configs.get_all_properties(module_name, 'tez-site')) has_hbase_masters = not len(hbase_master_hosts) == 0 has_oozie_server = not len(oozie_servers) == 0 has_falcon_server_hosts = not len(falcon_server_hosts) == 0 diff --git a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py index 3690821..373632c 100644 --- a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py +++ b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py @@ -156,7 +156,7 @@ def get_uid(user, return_existing=False): """ import params user_str = str(user) + "_uid" - service_env = [ serviceEnv for serviceEnv in params.module_configs if user_str in params.module_configs.get_property_value(params.module_name, serviceEnv, "")] + service_env = [ serviceEnv for serviceEnv in params.module_configs if params.module_configs.get_property_value(params.module_name, serviceEnv, user_str)] if service_env and params.module_configs.get_property_value(params.module_name, service_env[0], user_str): service_env_str = str(service_env[0]) diff --git a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py index 3004f1f..abb00df 100644 --- a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py +++ b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py @@ -58,21 +58,21 @@ repo_ubuntu = get_cluster_setting_value('repo_ubuntu_template') #hosts hostname = execution_command.get_host_name() ambari_server_hostname = execution_command.get_ambari_server_host() -rm_host = execution_command._execution_command.__get_value("clusterHostInfo/resourcemanager_hosts", []) -slave_hosts = execution_command._execution_command.__get_value("clusterHostInfo/datanode_hosts", []) -oozie_servers = execution_command._execution_command.__get_value("clusterHostInfo/oozie_server", []) -hcat_server_hosts = execution_command._execution_command.__get_value("clusterHostInfo/webhcat_server_hosts", []) -hive_server_host = execution_command._execution_command.__get_value("clusterHostInfo/hive_server_hosts", []) -hbase_master_hosts = execution_command._execution_command.__get_value("clusterHostInfo/hbase_master_hosts", []) -hs_host = execution_command._execution_command.__get_value("clusterHostInfo/historyserver_hosts", []) -jtnode_host = execution_command._execution_command.__get_value("clusterHostInfo/jtnode_hosts", []) -namenode_host = execution_command._execution_command.__get_value("clusterHostInfo/namenode_hosts", []) -zk_hosts = execution_command._execution_command.__get_value("clusterHostInfo/zookeeper_server_hosts", []) -ganglia_server_hosts = execution_command._execution_command.__get_value("clusterHostInfo/ganglia_server_hosts", []) -storm_server_hosts = execution_command._execution_command.__get_value("clusterHostInfo/nimbus_hosts", []) -falcon_host = execution_command._execution_command.__get_value("clusterHostInfo/falcon_server_hosts", []) - -has_sqoop_client = 'sqoop-env' in module_configs +rm_host = execution_command.get_component_hosts('resourcemanager') +slave_hosts = execution_command.get_component_hosts('datanode') +oozie_servers = execution_command.get_component_hosts('oozie_server') +hcat_server_hosts = execution_command.get_component_hosts('webhcat_server') +hive_server_host = execution_command.get_component_hosts('hive_server') +hbase_master_hosts = execution_command.get_component_hosts('hbase_master') +hs_host = execution_command.get_component_hosts('historyserver') +jtnode_host = execution_command.get_component_hosts('jtnode') +namenode_host = execution_command.get_component_hosts('namenode') +zk_hosts = execution_command.get_component_hosts('zookeeper_server') +ganglia_server_hosts = execution_command.get_component_hosts('ganglia_server') +storm_server_hosts = execution_command.get_component_hosts('nimbus') +falcon_host = execution_command.get_component_hosts('falcon_server') + +has_sqoop_client = bool(module_configs.get_all_properties(module_name, 'sqoop-env')) has_namenode = not len(namenode_host) == 0 has_hs = not len(hs_host) == 0 has_resourcemanager = not len(rm_host) == 0 @@ -85,7 +85,7 @@ has_zk_host = not len(zk_hosts) == 0 has_ganglia_server = not len(ganglia_server_hosts) == 0 has_storm_server = not len(storm_server_hosts) == 0 has_falcon_server = not len(falcon_host) == 0 -has_tez = module_configs.get_property_value(module_name, 'tez-site', '') is not None +has_tez = bool(module_configs.get_all_properties(module_name, 'tez-site')) is_namenode_master = hostname in namenode_host is_jtnode_master = hostname in jtnode_host @@ -118,4 +118,4 @@ if has_hbase_masters: repo_info = execution_command.get_repo_info() service_repo_info = execution_command.get_service_repo_info() -repo_file = execution_command._execution_command.__get_value("repositoryFile") +repo_file = execution_command.get_repository_file() diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py index 40e95b6..68533b5 100644 --- a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py +++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py @@ -65,7 +65,7 @@ major_stack_version = get_major_version(stack_version_formatted) dfs_type = execution_command.get_dfs_type() hadoop_conf_dir = "/etc/hadoop/conf" -component_list = execution_command._execution_command.__get_value("localComponents", []) +component_list = execution_command.get_local_components() hdfs_tmp_dir = module_configs.get_property_value(module_name, 'hadoop-env', 'hdfs_tmp_dir', '/tmp') @@ -96,7 +96,7 @@ java_home = execution_command.get_java_home() java_exec = "{0}/bin/java".format(java_home) if java_home is not None else "/bin/java" #users and groups -has_hadoop_env = 'hadoop-env' in module_configs +has_hadoop_env = bool(module_configs.get_all_properties(module_name, "hadoop-env")) mapred_user = module_configs.get_property_value(module_name, 'mapred-env', 'mapred_user') hdfs_user = module_configs.get_property_value(module_name, 'hadoop-env', 'hdfs_user') yarn_user = module_configs.get_property_value(module_name, 'yarn-env', 'yarn_user') @@ -106,24 +106,24 @@ user_group = get_cluster_setting_value('user_group') #hosts hostname = execution_command.get_host_name() ambari_server_hostname = execution_command.get_ambari_server_host() -rm_host = execution_command._execution_command.__get_value("clusterHostInfo/resourcemanager_hosts", []) -slave_hosts = execution_command._execution_command.__get_value("clusterHostInfo/datanode_hosts", []) -oozie_servers = execution_command._execution_command.__get_value("clusterHostInfo/oozie_server", []) -hcat_server_hosts = execution_command._execution_command.__get_value("clusterHostInfo/webhcat_server_hosts", []) -hive_server_host = execution_command._execution_command.__get_value("clusterHostInfo/hive_server_hosts", []) -hbase_master_hosts = execution_command._execution_command.__get_value("clusterHostInfo/hbase_master_hosts", []) -hs_host = execution_command._execution_command.__get_value("clusterHostInfo/historyserver_hosts", []) -jtnode_host = execution_command._execution_command.__get_value("clusterHostInfo/jtnode_hosts", []) -namenode_host = execution_command._execution_command.__get_value("clusterHostInfo/namenode_hosts", []) -zk_hosts = execution_command._execution_command.__get_value("clusterHostInfo/zookeeper_server_hosts", []) -ganglia_server_hosts = execution_command._execution_command.__get_value("clusterHostInfo/ganglia_server_hosts", []) +rm_host = execution_command.get_component_hosts('resourcemanager') +slave_hosts = execution_command.get_component_hosts('datanode') +oozie_servers = execution_command.get_component_hosts('oozie_server') +hcat_server_hosts = execution_command.get_component_hosts('webhcat_server') +hive_server_host = execution_command.get_component_hosts('hive_server') +hbase_master_hosts = execution_command.get_component_hosts('hbase_master') +hs_host = execution_command.get_component_hosts('historyserver') +jtnode_host = execution_command.get_component_hosts('jtnode') +namenode_host = execution_command.get_component_hosts('namenode') +zk_hosts = execution_command.get_component_hosts('zookeeper_server') +ganglia_server_hosts = execution_command.get_component_hosts('ganglia_server') cluster_name = execution_command.get_cluster_name() set_instanceId = "false" ams_collector_hosts = module_configs.get_property_value(module_name, 'cluster-env', 'metrics_collector_external_hosts') if ams_collector_hosts: set_instanceId = "true" else: - ams_collector_hosts = ",".join(execution_command._execution_command.__get_value("clusterHostInfo/metrics_collector_hosts", [])) + ams_collector_hosts = ",".join(execution_command.get_component_hosts('metrics_collector')) has_namenode = not len(namenode_host) == 0 has_resourcemanager = not len(rm_host) == 0 @@ -170,11 +170,11 @@ host_in_memory_aggregation = module_configs.get_property_value(module_name, 'ams host_in_memory_aggregation_port = module_configs.get_property_value(module_name, 'ams-site', 'timeline.metrics.host.inmemory.aggregation.port', 61888) # Cluster Zookeeper quorum -zookeeper_quorum = module_configs.get_property_value(module_name, 'zoo.cfg', 'clientPort') +zookeeper_quorum = None if has_zk_host: if not zookeeper_quorum: zookeeper_clientPort = '2181' - zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(execution_command._execution_command.__get_value("clusterHostInfo/zookeeper_server_hosts")) + zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(execution_command.get_component_hosts('zookeeper_server')) # last port config zookeeper_quorum += ':' + zookeeper_clientPort @@ -254,10 +254,10 @@ is_webhdfs_enabled = module_configs.get_property_value(module_name, 'hdfs-site', default_fs = module_configs.get_property_value(module_name, 'core-site', 'fs.defaultFS') #host info -all_hosts = execution_command._execution_command.__get_value("clusterHostInfo/all_hosts", []) -all_racks = execution_command._execution_command.__get_value("clusterHostInfo/all_racks", []) -all_ipv4_ips = execution_command._execution_command.__get_value("clusterHostInfo/all_ipv4_ips", []) -slave_hosts = execution_command._execution_command.__get_value("clusterHostInfo/datanode_hosts", []) +all_hosts = execution_command.get_all_hosts() +all_racks = execution_command.get_all_racks() +all_ipv4_ips = execution_command.get_all_ipv4_ips() +slave_hosts = execution_command.get_component_hosts('datanode') #topology files net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py" @@ -266,15 +266,15 @@ net_topology_mapping_data_file_name = 'topology_mappings.data' net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name) #Added logic to create /tmp and /user directory for HCFS stack. -has_core_site = 'core-site' in module_configs +has_core_site = bool(module_configs.get_all_properties(module_name, "core-site")) hdfs_user_keytab = module_configs.get_property_value(module_name, 'hadoop-env', 'hdfs_user_keytab') kinit_path_local = get_kinit_path() stack_version_unformatted = execution_command.get_mpack_version() stack_version_formatted = format_stack_version(stack_version_unformatted) hadoop_bin_dir = stack_select.get_hadoop_dir("bin") hdfs_principal_name = module_configs.get_property_value(module_name, 'hadoop-env', 'hdfs_principal_name') -hdfs_site = module_configs.get_property_value(module_name, 'hdfs-site', '') -smoke_user = get_cluster_setting_value('smokeuser') +hdfs_site = module_configs.get_all_properties(module_name, 'hdfs-site') +smoke_user = get_cluster_setting_value('smokeuser') smoke_hdfs_user_dir = format("/user/{smoke_user}") smoke_hdfs_user_mode = 0770 @@ -316,7 +316,7 @@ else: namenode_rpc = module_configs.get_property_value(module_name, 'hdfs-site', 'dfs.namenode.rpc-address', default_fs) # if HDFS is not installed in the cluster, then don't try to access namenode_rpc -if has_namenode and namenode_rpc and 'core-site' in module_configs: +if has_namenode and namenode_rpc and module_configs.get_all_properties(module_name, 'core-site'): port_str = namenode_rpc.split(':')[-1].strip() try: nn_rpc_client_port = int(port_str) -- To stop receiving notification emails like this one, please contact jlun...@apache.org.