This is an automated email from the ASF dual-hosted git repository. wuzhiguo pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/ambari.git
The following commit(s) were added to refs/heads/trunk by this push: new 3baaa39b52 AMBARI-25752: Remove deprecated hooks in BIGTOP folder (#3386) 3baaa39b52 is described below commit 3baaa39b52fed8f4e7fe0cdbdc2a0c6e2476bdb5 Author: timyuer <524860...@qq.com> AuthorDate: Mon Oct 10 17:31:58 2022 +0800 AMBARI-25752: Remove deprecated hooks in BIGTOP folder (#3386) --- .../3.2.0/hooks/after-INSTALL/scripts/hook.py | 39 --- .../3.2.0/hooks/after-INSTALL/scripts/params.py | 125 ------- .../after-INSTALL/scripts/shared_initialization.py | 148 -------- .../hooks/before-ANY/files/changeToSecureUid.sh | 64 ---- .../BIGTOP/3.2.0/hooks/before-ANY/scripts/hook.py | 39 --- .../3.2.0/hooks/before-ANY/scripts/params.py | 290 ---------------- .../before-ANY/scripts/shared_initialization.py | 273 --------------- .../3.2.0/hooks/before-INSTALL/scripts/hook.py | 37 -- .../3.2.0/hooks/before-INSTALL/scripts/params.py | 114 ------ .../before-INSTALL/scripts/repo_initialization.py | 76 ---- .../scripts/shared_initialization.py | 37 -- .../3.2.0/hooks/before-RESTART/scripts/hook.py | 30 -- .../3.2.0/hooks/before-SET_KEYTAB/scripts/hook.py | 39 --- .../hooks/before-START/files/checkForFormat.sh | 65 ---- .../before-START/files/fast-hdfs-resource.jar | Bin 19286899 -> 0 bytes .../hooks/before-START/files/task-log4j.properties | 134 -------- .../hooks/before-START/files/topology_script.py | 66 ---- .../before-START/scripts/custom_extensions.py | 173 ---------- .../3.2.0/hooks/before-START/scripts/hook.py | 43 --- .../3.2.0/hooks/before-START/scripts/params.py | 382 --------------------- .../hooks/before-START/scripts/rack_awareness.py | 48 --- .../before-START/scripts/shared_initialization.py | 262 -------------- .../templates/commons-logging.properties.j2 | 43 --- .../before-START/templates/exclude_hosts_list.j2 | 21 -- .../templates/hadoop-metrics2.properties.j2 | 114 ------ .../hooks/before-START/templates/health_check.j2 | 81 ----- .../before-START/templates/include_hosts_list.j2 | 21 -- .../templates/topology_mappings.data.j2 | 24 -- 28 files changed, 2788 deletions(-) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/after-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/after-INSTALL/scripts/hook.py deleted file mode 100644 index 39546b1f7a..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/after-INSTALL/scripts/hook.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -from resource_management.libraries.script.hook import Hook -from shared_initialization import link_configs -from shared_initialization import setup_config -from shared_initialization import setup_stack_symlinks - - -class AfterInstallHook(Hook): - - def hook(self, env): - import params - - env.set_params(params) - setup_stack_symlinks(self.stroutfile) - setup_config() - - link_configs(self.stroutfile) - - -if __name__ == "__main__": - AfterInstallHook().execute() diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/after-INSTALL/scripts/params.py deleted file mode 100644 index 910b02c8a6..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/after-INSTALL/scripts/params.py +++ /dev/null @@ -1,125 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -import os - -from ambari_commons.constants import AMBARI_SUDO_BINARY -from ambari_commons.constants import LOGFEEDER_CONF_DIR -from resource_management.libraries.script import Script -from resource_management.libraries.script.script import get_config_lock_file -from resource_management.libraries.functions import default -from resource_management.libraries.functions import conf_select -from resource_management.libraries.functions import stack_select -from resource_management.libraries.functions import format_jvm_option -from resource_management.libraries.functions.version import format_stack_version, get_major_version -from resource_management.libraries.functions.format import format -from string import lower - -config = Script.get_config() -tmp_dir = Script.get_tmp_dir() - -dfs_type = default("/clusterLevelParams/dfs_type", "") - -is_parallel_execution_enabled = int(default("/agentLevelParams/agentConfigParams/agent/parallel_execution", 0)) == 1 -host_sys_prepped = default("/ambariLevelParams/host_sys_prepped", False) - -sudo = AMBARI_SUDO_BINARY - -stack_version_unformatted = config['clusterLevelParams']['stack_version'] -stack_version_formatted = format_stack_version(stack_version_unformatted) -major_stack_version = get_major_version(stack_version_formatted) - -# service name -service_name = config['serviceName'] - -# logsearch configuration -logsearch_logfeeder_conf = LOGFEEDER_CONF_DIR - -agent_cache_dir = config['agentLevelParams']['agentCacheDir'] -service_package_folder = config['serviceLevelParams']['service_package_folder'] -logsearch_service_name = service_name.lower().replace("_", "-") -logsearch_config_file_name = 'input.config-' + logsearch_service_name + ".json" -logsearch_config_file_path = agent_cache_dir + "/" + service_package_folder + "/templates/" + logsearch_config_file_name + ".j2" -logsearch_config_file_exists = os.path.isfile(logsearch_config_file_path) - -# default hadoop params -hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec") - -mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*" - -versioned_stack_root = '/usr/hdp/current' - -#security params -security_enabled = config['configurations']['cluster-env']['security_enabled'] - -#java params -java_home = config['ambariLevelParams']['java_home'] - -#hadoop params -hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix'] -hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix'] -hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger'] - -jsvc_path = "/usr/lib/bigtop-utils" - -hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize'] -namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize'] -namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize'] -namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize'] -namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m") -namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m") - -jtnode_opt_newsize = "200m" -jtnode_opt_maxnewsize = "200m" -jtnode_heapsize = "1024m" -ttnode_heapsize = "1024m" - -dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize'] -mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce") -mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce") - -#users and groups -hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] -user_group = config['configurations']['cluster-env']['user_group'] - -namenode_hosts = default("/clusterHostInfo/namenode_hosts", []) -hdfs_client_hosts = default("/clusterHostInfo/hdfs_client_hosts", []) -has_hdfs_clients = len(hdfs_client_hosts) > 0 -has_namenode = len(namenode_hosts) > 0 -has_hdfs = has_hdfs_clients or has_namenode - -if has_hdfs or dfs_type == 'HCFS': - hadoop_conf_dir = conf_select.get_hadoop_conf_dir() - - mount_table_xml_inclusion_file_full_path = None - mount_table_content = None - if 'viewfs-mount-table' in config['configurations']: - xml_inclusion_file_name = 'viewfs-mount-table.xml' - mount_table = config['configurations']['viewfs-mount-table'] - - if 'content' in mount_table and mount_table['content'].strip(): - mount_table_xml_inclusion_file_full_path = os.path.join(hadoop_conf_dir, xml_inclusion_file_name) - mount_table_content = mount_table['content'] - -link_configs_lock_file = get_config_lock_file() -stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file") - -upgrade_suspended = default("/roleParams/upgrade_suspended", False) -sysprep_skip_conf_select = default("/configurations/cluster-env/sysprep_skip_conf_select", False) -conf_select_marker_file = format("{tmp_dir}/conf_select_done_marker") diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/after-INSTALL/scripts/shared_initialization.py deleted file mode 100644 index c9e84c320b..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/after-INSTALL/scripts/shared_initialization.py +++ /dev/null @@ -1,148 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" -import os - -import ambari_simplejson as json -from ambari_jinja2 import Environment as JinjaEnvironment -from resource_management.core.logger import Logger -from resource_management.core.resources.system import Directory, File -from resource_management.core.source import InlineTemplate, Template -from resource_management.libraries.functions import conf_select -from resource_management.libraries.functions import stack_select -from resource_management.libraries.functions.default import default -from resource_management.libraries.functions.format import format -from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock -from resource_management.libraries.resources.xml_config import XmlConfig -from resource_management.libraries.script import Script - - -def setup_stack_symlinks(struct_out_file): - """ - Invokes <stack-selector-tool> set all against a calculated fully-qualified, "normalized" version based on a - stack version, such as "2.3". This should always be called after a component has been - installed to ensure that all HDP pointers are correct. The stack upgrade logic does not - interact with this since it's done via a custom command and will not trigger this hook. - :return: - """ - import params - if params.upgrade_suspended: - Logger.warning("Skipping running stack-selector-tool because there is a suspended upgrade") - return - - if params.host_sys_prepped: - Logger.warning("Skipping running stack-selector-tool because this is a sys_prepped host. This may cause symlink pointers not to be created for HDP components installed later on top of an already sys_prepped host") - return - - # get the packages which the stack-select tool should be used on - #stack_packages = stack_select.get_packages(stack_select.PACKAGE_SCOPE_INSTALL) - #if stack_packages is None: - # return - - json_version = load_version(struct_out_file) - - if not json_version: - Logger.info("There is no advertised version for this component stored in {0}".format(struct_out_file)) - return - - # On parallel command execution this should be executed by a single process at a time. - with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True): - for package in stack_packages: - stack_select.select(package, json_version) - - -def setup_config(): - import params - stackversion = params.stack_version_unformatted - Logger.info("FS Type: {0}".format(params.dfs_type)) - - is_hadoop_conf_dir_present = False - if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir): - is_hadoop_conf_dir_present = True - else: - Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.") - - if is_hadoop_conf_dir_present and (params.has_hdfs or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'): - # create core-site only if the hadoop config directory exists - XmlConfig("core-site.xml", - conf_dir=params.hadoop_conf_dir, - configurations=params.config['configurations']['core-site'], - configuration_attributes=params.config['configurationAttributes']['core-site'], - owner=params.hdfs_user, - group=params.user_group, - only_if=format("ls {hadoop_conf_dir}"), - xml_include_file=params.mount_table_xml_inclusion_file_full_path - ) - - if params.mount_table_content: - File(os.path.join(params.hadoop_conf_dir, params.xml_inclusion_file_name), - owner=params.hdfs_user, - group=params.user_group, - content=params.mount_table_content - ) - - Directory(params.logsearch_logfeeder_conf, - mode=0755, - cd_access='a', - create_parents=True - ) - - if params.logsearch_config_file_exists: - File(format("{logsearch_logfeeder_conf}/" + params.logsearch_config_file_name), - content=Template(params.logsearch_config_file_path,extra_imports=[default]) - ) - else: - Logger.warning('No logsearch configuration exists at ' + params.logsearch_config_file_path) - - -def load_version(struct_out_file): - """ - Load version from file. Made a separate method for testing - """ - try: - with open(struct_out_file, 'r') as fp: - json_info = json.load(fp) - - return json_info['version'] - except (IOError, KeyError, TypeError): - return None - - -def link_configs(struct_out_file): - """ - Use the conf_select module to link configuration directories correctly. - """ - import params - - json_version = load_version(struct_out_file) - - if not json_version: - Logger.info("Could not load 'version' from {0}".format(struct_out_file)) - return - - if not params.sysprep_skip_conf_select or not os.path.exists(params.conf_select_marker_file): - # On parallel command execution this should be executed by a single process at a time. - with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True): - for package_name, directories in conf_select.get_package_dirs().iteritems(): - conf_select.convert_conf_directories_to_symlinks(package_name, json_version, directories) - - # create a file to mark that conf-selects were already done - with open(params.conf_select_marker_file, "wb") as fp: - pass - else: - Logger.info(format("Skipping conf-select stage, since cluster-env/sysprep_skip_conf_select is set and mark file {conf_select_marker_file} exists")) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-ANY/files/changeToSecureUid.sh deleted file mode 100644 index a6b8b77dae..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-ANY/files/changeToSecureUid.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env bash -# -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -username=$1 -directories=$2 -newUid=$3 - -function find_available_uid() { - for ((i=1001; i<=2000; i++)) - do - grep -q $i /etc/passwd - if [ "$?" -ne 0 ] - then - newUid=$i - break - fi - done -} - -if [ -z $2 ]; then - test $(id -u ${username} 2>/dev/null) - if [ $? -ne 1 ]; then - newUid=`id -u ${username}` - else - find_available_uid - fi - echo $newUid - exit 0 -else - find_available_uid -fi - -if [ $newUid -eq 0 ] -then - echo "Failed to find Uid between 1000 and 2000" - exit 1 -fi - -set -e -dir_array=($(echo $directories | sed 's/,/\n/g')) -old_uid=$(id -u $username) -sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E" -echo "Changing uid of $username from $old_uid to $newUid" -echo "Changing directory permisions for ${dir_array[@]}" -$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done -exit 0 diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-ANY/scripts/hook.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-ANY/scripts/hook.py deleted file mode 100644 index 25ca3a9c30..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-ANY/scripts/hook.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - - -from shared_initialization import setup_users, setup_hadoop_env, setup_java -from resource_management import Hook - - -class BeforeAnyHook(Hook): - - def hook(self, env): - import params - env.set_params(params) - - setup_users() - if params.has_hdfs or params.dfs_type == 'HCFS': - setup_hadoop_env() - setup_java() - - -if __name__ == "__main__": - BeforeAnyHook().execute() - diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-ANY/scripts/params.py deleted file mode 100644 index dce35dea8a..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-ANY/scripts/params.py +++ /dev/null @@ -1,290 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -import collections -import re -import os -import ast - -import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set. - -from resource_management.libraries.script import Script -from resource_management.libraries.functions import default -from resource_management.libraries.functions import format -from resource_management.libraries.functions import conf_select -from resource_management.libraries.functions import stack_select -from resource_management.libraries.functions import format_jvm_option -from resource_management.libraries.functions.is_empty import is_empty -from resource_management.libraries.functions.version import format_stack_version -from resource_management.libraries.functions.expect import expect -from resource_management.libraries.functions import StackFeature -from resource_management.libraries.functions.stack_features import check_stack_feature -from resource_management.libraries.functions.stack_features import get_stack_feature_version -from resource_management.libraries.functions.get_architecture import get_architecture -from ambari_commons.constants import AMBARI_SUDO_BINARY -from resource_management.libraries.functions.namenode_ha_utils import get_properties_for_all_nameservices, namenode_federation_enabled - - -config = Script.get_config() -tmp_dir = Script.get_tmp_dir() - -stack_root = Script.get_stack_root() - -architecture = get_architecture() - -dfs_type = default("/clusterLevelParams/dfs_type", "") - -artifact_dir = format("{tmp_dir}/AMBARI-artifacts/") -jdk_name = default("/ambariLevelParams/jdk_name", None) -java_home = config['ambariLevelParams']['java_home'] -java_version = expect("/ambariLevelParams/java_version", int) -jdk_location = config['ambariLevelParams']['jdk_location'] - -hadoop_custom_extensions_enabled = default("/configurations/core-site/hadoop.custom-extensions.enabled", False) - -sudo = AMBARI_SUDO_BINARY - -ambari_server_hostname = config['ambariLevelParams']['ambari_server_host'] - -stack_version_unformatted = config['clusterLevelParams']['stack_version'] -stack_version_formatted = format_stack_version(stack_version_unformatted) - -upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", "")) -version = default("/commandParams/version", None) -# Handle upgrade and downgrade -if (upgrade_type is not None) and version: - stack_version_formatted = format_stack_version(version) -ambari_java_home = default("/commandParams/ambari_java_home", None) -ambari_jdk_name = default("/commandParams/ambari_jdk_name", None) - -security_enabled = config['configurations']['cluster-env']['security_enabled'] -hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] - -# Some datanode settings -dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None) -dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None) -dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None) -dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None) -secure_dn_ports_are_in_use = False - -def get_port(address): - """ - Extracts port from the address like 0.0.0.0:1019 - """ - if address is None: - return None - m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address) - if m is not None: - return int(m.group(2)) - else: - return None - -def is_secure_port(port): - """ - Returns True if port is root-owned at *nix systems - """ - if port is not None: - return port < 1024 - else: - return False - -# upgrades would cause these directories to have a version instead of "current" -# which would cause a lot of problems when writing out hadoop-env.sh; instead -# force the use of "current" in the hook -hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000") -hadoop_home = stack_select.get_hadoop_dir("home") -hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec") -hadoop_lib_home = stack_select.get_hadoop_dir("lib") - -hadoop_dir = "/etc/hadoop" -hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir") -datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'] -is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']) - -mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*" - -if not security_enabled: - hadoop_secure_dn_user = '""' -else: - dfs_dn_port = get_port(dfs_dn_addr) - dfs_dn_http_port = get_port(dfs_dn_http_addr) - dfs_dn_https_port = get_port(dfs_dn_https_addr) - # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports - if dfs_http_policy == "HTTPS_ONLY": - secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port) - elif dfs_http_policy == "HTTP_AND_HTTPS": - secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port) - else: # params.dfs_http_policy == "HTTP_ONLY" or not defined: - secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) - if secure_dn_ports_are_in_use: - hadoop_secure_dn_user = hdfs_user - else: - hadoop_secure_dn_user = '""' - -#hadoop params -hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix'] -hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix'] -hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger'] - -jsvc_path = "/usr/lib/bigtop-utils" - -hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize'] -namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize'] -namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize'] -namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize'] -namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m") -namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m") - -jtnode_opt_newsize = "200m" -jtnode_opt_maxnewsize = "200m" -jtnode_heapsize = "1024m" -ttnode_heapsize = "1024m" - -dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize'] -nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize'] -mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce") -mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce") -hadoop_env_sh_template = config['configurations']['hadoop-env']['content'] - -#users and groups -hbase_user = config['configurations']['hbase-env']['hbase_user'] -smoke_user = config['configurations']['cluster-env']['smokeuser'] -gmetad_user = config['configurations']['ganglia-env']["gmetad_user"] -gmond_user = config['configurations']['ganglia-env']["gmond_user"] -tez_user = config['configurations']['tez-env']["tez_user"] -oozie_user = config['configurations']['oozie-env']["oozie_user"] -falcon_user = config['configurations']['falcon-env']["falcon_user"] -ranger_user = config['configurations']['ranger-env']["ranger_user"] -zeppelin_user = config['configurations']['zeppelin-env']["zeppelin_user"] -zeppelin_group = config['configurations']['zeppelin-env']["zeppelin_group"] - -user_group = config['configurations']['cluster-env']['user_group'] - -ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_hosts", []) -namenode_hosts = default("/clusterHostInfo/namenode_hosts", []) -hdfs_client_hosts = default("/clusterHostInfo/hdfs_client_hosts", []) -hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", []) -oozie_servers = default("/clusterHostInfo/oozie_server", []) -falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", []) -ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", []) -zeppelin_master_hosts = default("/clusterHostInfo/zeppelin_master_hosts", []) - -# get the correct version to use for checking stack features -version_for_stack_feature_checks = get_stack_feature_version(config) - - -has_namenode = len(namenode_hosts) > 0 -has_hdfs_clients = len(hdfs_client_hosts) > 0 -has_hdfs = has_hdfs_clients or has_namenode -has_ganglia_server = not len(ganglia_server_hosts) == 0 -has_tez = 'tez-site' in config['configurations'] -has_hbase_masters = not len(hbase_master_hosts) == 0 -has_oozie_server = not len(oozie_servers) == 0 -has_falcon_server_hosts = not len(falcon_server_hosts) == 0 -has_ranger_admin = not len(ranger_admin_hosts) == 0 -has_zeppelin_master = not len(zeppelin_master_hosts) == 0 -stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks) - -hostname = config['agentLevelParams']['hostname'] -hdfs_site = config['configurations']['hdfs-site'] - -# HDFS High Availability properties -dfs_ha_enabled = False -dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None) -if dfs_ha_nameservices is None: - dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None) - -# on stacks without any filesystem there is no hdfs-site -dfs_ha_namenode_ids_all_ns = get_properties_for_all_nameservices(hdfs_site, 'dfs.ha.namenodes') if 'hdfs-site' in config['configurations'] else {} -dfs_ha_automatic_failover_enabled = default("/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False) - -# Values for the current Host -namenode_id = None -namenode_rpc = None - -dfs_ha_namemodes_ids_list = [] -other_namenode_id = None - -for ns, dfs_ha_namenode_ids in dfs_ha_namenode_ids_all_ns.iteritems(): - found = False - if not is_empty(dfs_ha_namenode_ids): - dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",") - dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list) - if dfs_ha_namenode_ids_array_len > 1: - dfs_ha_enabled = True - if dfs_ha_enabled: - for nn_id in dfs_ha_namemodes_ids_list: - nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{ns}.{nn_id}')] - if hostname in nn_host: - namenode_id = nn_id - namenode_rpc = nn_host - found = True - # With HA enabled namenode_address is recomputed - namenode_address = format('hdfs://{ns}') - - # Calculate the namenode id of the other namenode. This is needed during RU to initiate an HA failover using ZKFC. - if namenode_id is not None and len(dfs_ha_namemodes_ids_list) == 2: - other_namenode_id = list(set(dfs_ha_namemodes_ids_list) - set([namenode_id]))[0] - - if found: - break - -if has_hdfs or dfs_type == 'HCFS': - hadoop_conf_dir = conf_select.get_hadoop_conf_dir() - hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure") - -hbase_tmp_dir = "/tmp/hbase-hbase" - -proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users") -ranger_group = config['configurations']['ranger-env']['ranger_group'] -dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"] - -sysprep_skip_create_users_and_groups = default("/configurations/cluster-env/sysprep_skip_create_users_and_groups", False) -ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False) -fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"] - -smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user}") -if has_hbase_masters: - hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}") -#repo params -repo_info = config['hostLevelParams']['repoInfo'] -service_repo_info = default("/hostLevelParams/service_repo_info",None) - -user_to_groups_dict = {} - -#Append new user-group mapping to the dict -try: - user_group_map = ast.literal_eval(config['clusterLevelParams']['user_groups']) - for key in user_group_map.iterkeys(): - user_to_groups_dict[key] = user_group_map[key] -except ValueError: - print('User Group mapping (user_group) is missing in the hostLevelParams') - -user_to_gid_dict = collections.defaultdict(lambda:user_group) - -user_list = json.loads(config['clusterLevelParams']['user_list']) -group_list = json.loads(config['clusterLevelParams']['group_list']) -host_sys_prepped = default("/ambariLevelParams/host_sys_prepped", False) - -tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"] -override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower() - -# if NN HA on secure clutser, access Zookeper securely -if stack_supports_zk_security and dfs_ha_enabled and security_enabled: - hadoop_zkfc_opts=format("-Dzookeeper.sasl.client=true -Dzookeeper.sasl.client.username=zookeeper -Djava.security.auth.login.config={hadoop_conf_secure_dir}/hdfs_jaas.conf -Dzookeeper.sasl.clientconfig=Client") diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-ANY/scripts/shared_initialization.py deleted file mode 100644 index ec9497fdaa..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-ANY/scripts/shared_initialization.py +++ /dev/null @@ -1,273 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -import os -import re -import getpass -import tempfile -from copy import copy -from resource_management.libraries.functions.version import compare_versions -from resource_management import * -from resource_management.core import shell - -def setup_users(): - """ - Creates users before cluster installation - """ - import params - - should_create_users_and_groups = False - if params.host_sys_prepped: - should_create_users_and_groups = not params.sysprep_skip_create_users_and_groups - else: - should_create_users_and_groups = not params.ignore_groupsusers_create - - if should_create_users_and_groups: - for group in params.group_list: - Group(group, - ) - - for user in params.user_list: - User(user, - uid = get_uid(user) if params.override_uid == "true" else None, - gid = params.user_to_gid_dict[user], - groups = params.user_to_groups_dict[user], - fetch_nonlocal_groups = params.fetch_nonlocal_groups, - ) - - if params.override_uid == "true": - set_uid(params.smoke_user, params.smoke_user_dirs) - else: - Logger.info('Skipping setting uid for smoke user as host is sys prepped') - else: - Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on') - pass - - - if params.has_hbase_masters: - Directory (params.hbase_tmp_dir, - owner = params.hbase_user, - mode=0775, - create_parents = True, - cd_access="a", - ) - - if params.override_uid == "true": - set_uid(params.hbase_user, params.hbase_user_dirs) - else: - Logger.info('Skipping setting uid for hbase user as host is sys prepped') - - if should_create_users_and_groups: - if params.has_hdfs: - create_dfs_cluster_admins() - if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0: - create_tez_am_view_acls() - else: - Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped') - -def create_dfs_cluster_admins(): - """ - dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names> - """ - import params - - groups_list = create_users_and_groups(params.dfs_cluster_administrators_group) - - User(params.hdfs_user, - groups = params.user_to_groups_dict[params.hdfs_user] + groups_list, - fetch_nonlocal_groups = params.fetch_nonlocal_groups - ) - -def create_tez_am_view_acls(): - - """ - tez.am.view-acls support format <comma-delimited list of usernames><space><comma-delimited list of group names> - """ - import params - - if not params.tez_am_view_acls.startswith("*"): - create_users_and_groups(params.tez_am_view_acls) - -def create_users_and_groups(user_and_groups): - - import params - - parts = re.split('\s+', user_and_groups) - if len(parts) == 1: - parts.append("") - - users_list = parts[0].strip(",").split(",") if parts[0] else [] - groups_list = parts[1].strip(",").split(",") if parts[1] else [] - - # skip creating groups and users if * is provided as value. - users_list = filter(lambda x: x != '*' , users_list) - groups_list = filter(lambda x: x != '*' , groups_list) - - if users_list: - User(users_list, - fetch_nonlocal_groups = params.fetch_nonlocal_groups - ) - - if groups_list: - Group(copy(groups_list), - ) - return groups_list - -def set_uid(user, user_dirs): - """ - user_dirs - comma separated directories - """ - import params - - File(format("{tmp_dir}/changeUid.sh"), - content=StaticFile("changeToSecureUid.sh"), - mode=0555) - ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower() - uid = get_uid(user, return_existing=True) - Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} {new_uid}", new_uid=0 if uid is None else uid), - not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})")) - -def get_uid(user, return_existing=False): - """ - Tries to get UID for username. It will try to find UID in custom properties in *cluster_env* and, if *return_existing=True*, - it will try to return UID of existing *user*. - - :param user: username to get UID for - :param return_existing: return UID for existing user - :return: - """ - import params - user_str = str(user) + "_uid" - service_env = [ serviceEnv for serviceEnv in params.config['configurations'] if user_str in params.config['configurations'][serviceEnv]] - - if service_env and params.config['configurations'][service_env[0]][user_str]: - service_env_str = str(service_env[0]) - uid = params.config['configurations'][service_env_str][user_str] - if len(service_env) > 1: - Logger.warning("Multiple values found for %s, using %s" % (user_str, uid)) - return uid - else: - if return_existing: - # pick up existing UID or try to find available UID in /etc/passwd, see changeToSecureUid.sh for more info - if user == params.smoke_user: - return None - File(format("{tmp_dir}/changeUid.sh"), - content=StaticFile("changeToSecureUid.sh"), - mode=0555) - code, newUid = shell.call(format("{tmp_dir}/changeUid.sh {user}")) - return int(newUid) - else: - # do not return UID for existing user, used in User resource call to let OS to choose UID for us - return None - -def setup_hadoop_env(): - import params - stackversion = params.stack_version_unformatted - Logger.info("FS Type: {0}".format(params.dfs_type)) - if params.has_hdfs or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS': - if params.security_enabled: - tc_owner = "root" - else: - tc_owner = params.hdfs_user - - # create /etc/hadoop - Directory(params.hadoop_dir, mode=0755) - - # write out hadoop-env.sh, but only if the directory exists - if os.path.exists(params.hadoop_conf_dir): - File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner, - group=params.user_group, - content=InlineTemplate(params.hadoop_env_sh_template)) - - # Create tmp dir for java.io.tmpdir - # Handle a situation when /tmp is set to noexec - Directory(params.hadoop_java_io_tmpdir, - owner=params.hdfs_user, - group=params.user_group, - mode=01777 - ) - -def setup_java(): - """ - Install jdk using specific params. - Install ambari jdk as well if the stack and ambari jdk are different. - """ - import params - __setup_java(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name) - if params.ambari_java_home and params.ambari_java_home != params.java_home: - __setup_java(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name) - -def __setup_java(custom_java_home, custom_jdk_name): - """ - Installs jdk using specific params, that comes from ambari-server - """ - import params - java_exec = format("{custom_java_home}/bin/java") - - if not os.path.isfile(java_exec): - if not params.jdk_name: # if custom jdk is used. - raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host.")) - - jdk_curl_target = format("{tmp_dir}/{custom_jdk_name}") - java_dir = os.path.dirname(params.java_home) - - Directory(params.artifact_dir, - create_parents = True, - ) - - File(jdk_curl_target, - content = DownloadSource(format("{jdk_location}/{custom_jdk_name}")), - not_if = format("test -f {jdk_curl_target}") - ) - - File(jdk_curl_target, - mode = 0755, - ) - - tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir) - - try: - if params.jdk_name.endswith(".bin"): - chmod_cmd = ("chmod", "+x", jdk_curl_target) - install_cmd = format("cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}") - elif params.jdk_name.endswith(".gz"): - chmod_cmd = ("chmod","a+x", java_dir) - install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}") - - Directory(java_dir - ) - - Execute(chmod_cmd, - sudo = True, - ) - - Execute(install_cmd, - ) - - finally: - Directory(tmp_java_dir, action="delete") - - File(format("{custom_java_home}/bin/java"), - mode=0755, - cd_access="a", - ) - Execute(('chmod', '-R', '755', params.java_home), - sudo = True, - ) - diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-INSTALL/scripts/hook.py deleted file mode 100644 index c4709657f7..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-INSTALL/scripts/hook.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" -from resource_management import Hook -from shared_initialization import install_packages -from repo_initialization import install_repos - - -class BeforeInstallHook(Hook): - - def hook(self, env): - import params - - self.run_custom_hook('before-ANY') - env.set_params(params) - - install_repos() - install_packages() - - -if __name__ == "__main__": - BeforeInstallHook().execute() diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-INSTALL/scripts/params.py deleted file mode 100644 index 34b4f37597..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-INSTALL/scripts/params.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -from ambari_commons.constants import AMBARI_SUDO_BINARY -from resource_management.libraries.functions.version import format_stack_version, compare_versions -from resource_management.core.system import System -from resource_management.libraries.script.script import Script -from resource_management.libraries.functions import default, format -from resource_management.libraries.functions.expect import expect - -config = Script.get_config() -tmp_dir = Script.get_tmp_dir() -sudo = AMBARI_SUDO_BINARY - -stack_version_unformatted = config['clusterLevelParams']['stack_version'] -agent_stack_retry_on_unavailability = config['ambariLevelParams']['agent_stack_retry_on_unavailability'] -agent_stack_retry_count = expect("/ambariLevelParams/agent_stack_retry_count", int) -stack_version_formatted = format_stack_version(stack_version_unformatted) - -#users and groups -hbase_user = config['configurations']['hbase-env']['hbase_user'] -smoke_user = config['configurations']['cluster-env']['smokeuser'] -gmetad_user = config['configurations']['ganglia-env']["gmetad_user"] -gmond_user = config['configurations']['ganglia-env']["gmond_user"] -tez_user = config['configurations']['tez-env']["tez_user"] - -user_group = config['configurations']['cluster-env']['user_group'] -proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users") - -hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix'] - -# repo templates -repo_rhel_suse = config['configurations']['cluster-env']['repo_suse_rhel_template'] -repo_ubuntu = config['configurations']['cluster-env']['repo_ubuntu_template'] - -#hosts -hostname = config['agentLevelParams']['hostname'] -ambari_server_hostname = config['ambariLevelParams']['ambari_server_host'] -rm_host = default("/clusterHostInfo/resourcemanager_hosts", []) -slave_hosts = default("/clusterHostInfo/datanode_hosts", []) -oozie_servers = default("/clusterHostInfo/oozie_server", []) -hcat_server_hosts = default("/clusterHostInfo/webhcat_server_hosts", []) -hive_server_host = default("/clusterHostInfo/hive_server_hosts", []) -hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", []) -hs_host = default("/clusterHostInfo/historyserver_hosts", []) -jtnode_host = default("/clusterHostInfo/jtnode_hosts", []) -namenode_hosts = default("/clusterHostInfo/namenode_hosts", []) -zk_hosts = default("/clusterHostInfo/zookeeper_server_hosts", []) -ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_hosts", []) -storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", []) -falcon_host = default('/clusterHostInfo/falcon_server_hosts', []) - -has_namenode = len(namenode_hosts) > 0 -has_hs = not len(hs_host) == 0 -has_resourcemanager = not len(rm_host) == 0 -has_slaves = not len(slave_hosts) == 0 -has_oozie_server = not len(oozie_servers) == 0 -has_hcat_server_host = not len(hcat_server_hosts) == 0 -has_hive_server_host = not len(hive_server_host) == 0 -has_hbase_masters = not len(hbase_master_hosts) == 0 -has_zk_host = not len(zk_hosts) == 0 -has_ganglia_server = not len(ganglia_server_hosts) == 0 -has_storm_server = not len(storm_server_hosts) == 0 -has_falcon_server = not len(falcon_host) == 0 -has_tez = 'tez-site' in config['configurations'] - -is_namenode_master = hostname in namenode_hosts -is_jtnode_master = hostname in jtnode_host -is_rmnode_master = hostname in rm_host -is_hsnode_master = hostname in hs_host -is_hbase_master = hostname in hbase_master_hosts -is_slave = hostname in slave_hosts -if has_ganglia_server: - ganglia_server_host = ganglia_server_hosts[0] - -hbase_tmp_dir = "/tmp/hbase-hbase" - -#security params -security_enabled = config['configurations']['cluster-env']['security_enabled'] - -#java params -java_home = config['ambariLevelParams']['java_home'] -artifact_dir = format("{tmp_dir}/AMBARI-artifacts/") -jdk_name = default("/ambariLevelParams/jdk_name", None) # None when jdk is already installed by user -jce_policy_zip = default("/ambariLevelParams/jce_name", None) # None when jdk is already installed by user -jce_location = config['ambariLevelParams']['jdk_location'] -jdk_location = config['ambariLevelParams']['jdk_location'] -ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False) -host_sys_prepped = default("/ambariLevelParams/host_sys_prepped", False) - -smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user}") -if has_hbase_masters: - hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}") -#repo params -repo_info = config['hostLevelParams']['repoInfo'] -service_repo_info = default("/hostLevelParams/service_repo_info",None) - -repo_file = default("/repositoryFile", None) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-INSTALL/scripts/repo_initialization.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-INSTALL/scripts/repo_initialization.py deleted file mode 100644 index f6f2a1255c..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-INSTALL/scripts/repo_initialization.py +++ /dev/null @@ -1,76 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -from ambari_commons.os_check import OSCheck -from resource_management.libraries.resources.repository import Repository -from resource_management.libraries.functions.repository_util import CommandRepository, UBUNTU_REPO_COMPONENTS_POSTFIX -from resource_management.libraries.script.script import Script -from resource_management.core.logger import Logger -import ambari_simplejson as json - - -def _alter_repo(action, repo_dicts, repo_template): - """ - @param action: "delete" or "create" - @param repo_dicts: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]" - """ - if not isinstance(repo_dicts, list): - repo_dicts = [repo_dicts] - - if 0 == len(repo_dicts): - Logger.info("Repository list is empty. Ambari may not be managing the repositories.") - else: - Logger.info("Initializing {0} repositories".format(str(len(repo_dicts)))) - - for repo in repo_dicts: - if not 'baseUrl' in repo: - repo['baseUrl'] = None - if not 'mirrorsList' in repo: - repo['mirrorsList'] = None - - ubuntu_components = [ repo['distribution'] if 'distribution' in repo and repo['distribution'] else repo['repoName'] ] \ - + [repo['components'].replace(",", " ") if 'components' in repo and repo['components'] else UBUNTU_REPO_COMPONENTS_POSTFIX] - - Repository(repo['repoId'], - action = "prepare", - base_url = repo['baseUrl'], - mirror_list = repo['mirrorsList'], - repo_file_name = repo['repoName'], - repo_template = repo_template, - components = ubuntu_components) # ubuntu specific - - Repository(None, action = "create") - - -def install_repos(): - import params - if params.host_sys_prepped: - return - - # use this newer way of specifying repositories, if available - if params.repo_file is not None: - Script.repository_util.create_repo_files() - return - - template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu - - _alter_repo("create", params.repo_info, template) - - if params.service_repo_info: - _alter_repo("create", params.service_repo_info, template) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-INSTALL/scripts/shared_initialization.py deleted file mode 100644 index f4a5e15c2e..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-INSTALL/scripts/shared_initialization.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -import os - -from resource_management.libraries.functions import stack_tools -from resource_management.libraries.functions.version import compare_versions -from resource_management.core.resources.packaging import Package - -def install_packages(): - import params - if params.host_sys_prepped: - return - - packages = ['unzip', 'curl'] - # if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0: - # stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME) - # packages.append(stack_selector_package) - Package(packages, - retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability, - retry_count=params.agent_stack_retry_count) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-RESTART/scripts/hook.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-RESTART/scripts/hook.py deleted file mode 100644 index f7f4f1c975..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-RESTART/scripts/hook.py +++ /dev/null @@ -1,30 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" -from resource_management import Hook - - -class BeforeRestartHook(Hook): - - def hook(self, env): - self.run_custom_hook('before-START') - - -if __name__ == "__main__": - BeforeRestartHook().execute() - diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-SET_KEYTAB/scripts/hook.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-SET_KEYTAB/scripts/hook.py deleted file mode 100644 index 289475bf5c..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-SET_KEYTAB/scripts/hook.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" -from resource_management import Hook - - -class BeforeSetKeytabHook(Hook): - - def hook(self, env): - """ - This will invoke the before-ANY hook which contains all of the user and group creation logic. - Keytab regeneration requires all users are already created, which is usually done by the - before-INSTALL hook. However, if the keytab regeneration is executed as part of an upgrade, - then the before-INSTALL hook never ran. - - :param env: - :return: - """ - self.run_custom_hook('before-ANY') - - -if __name__ == "__main__": - BeforeSetKeytabHook().execute() - diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/files/checkForFormat.sh deleted file mode 100644 index 68aa96da8b..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/files/checkForFormat.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env bash -# -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# - -export hdfs_user=$1 -shift -export conf_dir=$1 -shift -export bin_dir=$1 -shift -export mark_dir=$1 -shift -export name_dirs=$* - -export EXIT_CODE=0 -export command="namenode -format" -export list_of_non_empty_dirs="" - -mark_file=/var/run/hadoop/hdfs/namenode-formatted -if [[ -f ${mark_file} ]] ; then - /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file} - /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir} -fi - -if [[ ! -d $mark_dir ]] ; then - for dir in `echo $name_dirs | tr ',' ' '` ; do - echo "NameNode Dirname = $dir" - cmd="ls $dir | wc -l | grep -q ^0$" - eval $cmd - if [[ $? -ne 0 ]] ; then - (( EXIT_CODE = $EXIT_CODE + 1 )) - list_of_non_empty_dirs="$list_of_non_empty_dirs $dir" - fi - done - - if [[ $EXIT_CODE == 0 ]] ; then - /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}" - (( EXIT_CODE = $EXIT_CODE | $? )) - else - echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}" - fi -else - echo "${mark_dir} exists. Namenode DFS already formatted" -fi - -exit $EXIT_CODE - diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/files/fast-hdfs-resource.jar b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/files/fast-hdfs-resource.jar deleted file mode 100644 index b8f633fd24..0000000000 Binary files a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/files/fast-hdfs-resource.jar and /dev/null differ diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/files/task-log4j.properties deleted file mode 100644 index 7e12962b29..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/files/task-log4j.properties +++ /dev/null @@ -1,134 +0,0 @@ -# -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# - - -# Define some default values that can be overridden by system properties -hadoop.root.logger=INFO,console -hadoop.log.dir=. -hadoop.log.file=hadoop.log - -# -# Job Summary Appender -# -# Use following logger to send summary to separate file defined by -# hadoop.mapreduce.jobsummary.log.file rolled daily: -# hadoop.mapreduce.jobsummary.logger=INFO,JSA -# -hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger} -hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log - -# Define the root logger to the system property "hadoop.root.logger". -log4j.rootLogger=${hadoop.root.logger}, EventCounter - -# Logging Threshold -log4j.threshhold=ALL - -# -# Daily Rolling File Appender -# - -log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file} - -# Rollver at midnight -log4j.appender.DRFA.DatePattern=.yyyy-MM-dd - -# 30-day backup -#log4j.appender.DRFA.MaxBackupIndex=30 -log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout - -# Pattern format: Date LogLevel LoggerName LogMessage -log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -# Debugging Pattern format -#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n - - -# -# console -# Add "console" to rootlogger above if you want to use this -# - -log4j.appender.console=org.apache.log4j.ConsoleAppender -log4j.appender.console.target=System.err -log4j.appender.console.layout=org.apache.log4j.PatternLayout -log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n - -# -# TaskLog Appender -# - -#Default values -hadoop.tasklog.taskid=null -hadoop.tasklog.iscleanup=false -hadoop.tasklog.noKeepSplits=4 -hadoop.tasklog.totalLogFileSize=100 -hadoop.tasklog.purgeLogSplits=true -hadoop.tasklog.logsRetainHours=12 - -log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender -log4j.appender.TLA.taskId=${hadoop.tasklog.taskid} -log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup} -log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize} - -log4j.appender.TLA.layout=org.apache.log4j.PatternLayout -log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n - -# -# Rolling File Appender -# - -#log4j.appender.RFA=org.apache.log4j.RollingFileAppender -#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file} - -# Logfile size and and 30-day backups -#log4j.appender.RFA.MaxFileSize=1MB -#log4j.appender.RFA.MaxBackupIndex=30 - -#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout -#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n -#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n - - -# Custom Logging levels - -hadoop.metrics.log.level=INFO -#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG -#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG -#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG -log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level} - -# Jets3t library -log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR - -# -# Null Appender -# Trap security logger on the hadoop client side -# -log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender - -# -# Event Counter Appender -# Sends counts of logging messages at different severity levels to Hadoop Metrics. -# -log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter - -# Removes "deprecated" messages -log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/files/topology_script.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/files/topology_script.py deleted file mode 100644 index 0f7a55c8ab..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/files/topology_script.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env python -''' -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -''' - -import sys, os -from string import join -import ConfigParser - - -DEFAULT_RACK = "/default-rack" -DATA_FILE_NAME = os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data" -SECTION_NAME = "network_topology" - -class TopologyScript(): - - def load_rack_map(self): - try: - #RACK_MAP contains both host name vs rack and ip vs rack mappings - mappings = ConfigParser.ConfigParser() - mappings.read(DATA_FILE_NAME) - return dict(mappings.items(SECTION_NAME)) - except ConfigParser.NoSectionError: - return {} - - def get_racks(self, rack_map, args): - if len(args) == 1: - return DEFAULT_RACK - else: - return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],) - - def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map): - #try looking up by hostname - rack = rack_map.get(hostname_or_ip) - if rack is not None: - return rack - #try looking up by ip - rack = rack_map.get(self.extract_ip(hostname_or_ip)) - #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped - return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK) - - #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010 - def extract_ip(self, container_string): - return container_string.split("/")[0].split(":")[0] - - def execute(self, args): - rack_map = self.load_rack_map() - rack = self.get_racks(rack_map, args) - print rack - -if __name__ == "__main__": - TopologyScript().execute(sys.argv) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/scripts/custom_extensions.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/scripts/custom_extensions.py deleted file mode 100644 index 04299ba68d..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/scripts/custom_extensions.py +++ /dev/null @@ -1,173 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -import os - -from resource_management.core.resources import Directory -from resource_management.core.resources import Execute -from resource_management.libraries.functions import default -from resource_management.libraries.script.script import Script -from resource_management.libraries.functions import format - - -DEFAULT_HADOOP_HDFS_EXTENSION_DIR = "/hdp/ext/{0}/hadoop" -DEFAULT_HADOOP_HIVE_EXTENSION_DIR = "/hdp/ext/{0}/hive" -DEFAULT_HADOOP_HBASE_EXTENSION_DIR = "/hdp/ext/{0}/hbase" - -def setup_extensions(): - """ - The goal of this method is to distribute extensions (for example jar files) from - HDFS (/hdp/ext/{major_stack_version}/{service_name}) to all nodes which contain related - components of service (YARN, HIVE or HBASE). Extensions should be added to HDFS by - user manually. - """ - - import params - - # Hadoop Custom extensions - hadoop_custom_extensions_enabled = default("/configurations/core-site/hadoop.custom-extensions.enabled", False) - hadoop_custom_extensions_services = default("/configurations/core-site/hadoop.custom-extensions.services", "") - hadoop_custom_extensions_owner = default("/configurations/core-site/hadoop.custom-extensions.owner", params.hdfs_user) - hadoop_custom_extensions_hdfs_dir = get_config_formatted_value(default("/configurations/core-site/hadoop.custom-extensions.root", - DEFAULT_HADOOP_HDFS_EXTENSION_DIR.format(params.major_stack_version))) - hadoop_custom_extensions_services = [ service.strip().upper() for service in hadoop_custom_extensions_services.split(",") ] - hadoop_custom_extensions_services.append("YARN") - - hadoop_custom_extensions_local_dir = "{0}/current/ext/hadoop".format(Script.get_stack_root()) - - if params.current_service in hadoop_custom_extensions_services: - clean_extensions(hadoop_custom_extensions_local_dir) - if hadoop_custom_extensions_enabled: - download_extensions(hadoop_custom_extensions_owner, params.user_group, - hadoop_custom_extensions_hdfs_dir, - hadoop_custom_extensions_local_dir) - - setup_extensions_hive() - - hbase_custom_extensions_services = [] - hbase_custom_extensions_services.append("HBASE") - if params.current_service in hbase_custom_extensions_services: - setup_hbase_extensions() - - -def setup_hbase_extensions(): - import params - - # HBase Custom extensions - hbase_custom_extensions_enabled = default("/configurations/hbase-site/hbase.custom-extensions.enabled", False) - hbase_custom_extensions_owner = default("/configurations/hbase-site/hbase.custom-extensions.owner", params.hdfs_user) - hbase_custom_extensions_hdfs_dir = get_config_formatted_value(default("/configurations/hbase-site/hbase.custom-extensions.root", - DEFAULT_HADOOP_HBASE_EXTENSION_DIR.format(params.major_stack_version))) - hbase_custom_extensions_local_dir = "{0}/current/ext/hbase".format(Script.get_stack_root()) - - impacted_components = ['HBASE_MASTER', 'HBASE_REGIONSERVER', 'PHOENIX_QUERY_SERVER']; - role = params.config.get('role','') - - if role in impacted_components: - clean_extensions(hbase_custom_extensions_local_dir) - if hbase_custom_extensions_enabled: - download_extensions(hbase_custom_extensions_owner, params.user_group, - hbase_custom_extensions_hdfs_dir, - hbase_custom_extensions_local_dir) - - -def setup_extensions_hive(): - import params - - hive_custom_extensions_enabled = default("/configurations/hive-site/hive.custom-extensions.enabled", False) - hive_custom_extensions_owner = default("/configurations/hive-site/hive.custom-extensions.owner", params.hdfs_user) - hive_custom_extensions_hdfs_dir = DEFAULT_HADOOP_HIVE_EXTENSION_DIR.format(params.major_stack_version) - - hive_custom_extensions_local_dir = "{0}/current/ext/hive".format(Script.get_stack_root()) - - impacted_components = ['HIVE_SERVER', 'HIVE_CLIENT']; - role = params.config.get('role','') - - # Run copying for HIVE_SERVER and HIVE_CLIENT - if params.current_service == 'HIVE' and role in impacted_components: - clean_extensions(hive_custom_extensions_local_dir) - if hive_custom_extensions_enabled: - download_extensions(hive_custom_extensions_owner, params.user_group, - hive_custom_extensions_hdfs_dir, - hive_custom_extensions_local_dir) - -def download_extensions(owner_user, owner_group, hdfs_source_dir, local_target_dir): - """ - :param owner_user: user owner of the HDFS directory - :param owner_group: group owner of the HDFS directory - :param hdfs_source_dir: the HDFS directory from where the files are being pull - :param local_target_dir: the location of where to download the files - :return: Will return True if successful, otherwise, False. - """ - import params - - if not os.path.isdir(local_target_dir): - extensions_tmp_dir=format("{tmp_dir}/custom_extensions") - Directory(local_target_dir, - owner="root", - mode=0755, - group="root", - create_parents=True) - - params.HdfsResource(hdfs_source_dir, - type="directory", - action="create_on_execute", - owner=owner_user, - group=owner_group, - mode=0755) - - Directory(extensions_tmp_dir, - owner=params.hdfs_user, - mode=0755, - create_parents=True) - - # copy from hdfs to /tmp - params.HdfsResource(extensions_tmp_dir, - type="directory", - action="download_on_execute", - source=hdfs_source_dir, - user=params.hdfs_user, - mode=0644, - replace_existing_files=True) - - # Execute command is not quoting correctly. - cmd = format("{sudo} mv {extensions_tmp_dir}/* {local_target_dir}") - only_if_cmd = "ls -d {extensions_tmp_dir}/*".format(extensions_tmp_dir=extensions_tmp_dir) - Execute(cmd, only_if=only_if_cmd) - - only_if_local = 'ls -d "{local_target_dir}"'.format(local_target_dir=local_target_dir) - Execute(("chown", "-R", "root:root", local_target_dir), - sudo=True, - only_if=only_if_local) - - params.HdfsResource(None,action="execute") - return True - -def clean_extensions(local_dir): - """ - :param local_dir: The local directory where the extensions are stored. - :return: Will return True if successful, otherwise, False. - """ - if os.path.isdir(local_dir): - Directory(local_dir, - action="delete") - return True - -def get_config_formatted_value(property_value): - return format(property_value.replace("{{", "{").replace("}}", "}")) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/scripts/hook.py deleted file mode 100644 index 2f68cb14fb..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/scripts/hook.py +++ /dev/null @@ -1,43 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" -from rack_awareness import create_topology_script_and_mapping -from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink, setup_unlimited_key_jce_policy, \ - Hook -from custom_extensions import setup_extensions - - -class BeforeStartHook(Hook): - - def hook(self, env): - import params - - self.run_custom_hook('before-ANY') - env.set_params(params) - - setup_hadoop() - setup_configs() - create_javahome_symlink() - create_topology_script_and_mapping() - setup_unlimited_key_jce_policy() - if params.stack_supports_hadoop_custom_extensions: - setup_extensions() - - -if __name__ == "__main__": - BeforeStartHook().execute() diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/scripts/params.py deleted file mode 100644 index faccce39e1..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/scripts/params.py +++ /dev/null @@ -1,382 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -import os - -from resource_management.libraries.functions import conf_select -from resource_management.libraries.functions import stack_select -from resource_management.libraries.functions import default -from resource_management.libraries.functions import format_jvm_option -from resource_management.libraries.functions import format -from resource_management.libraries.functions.version import format_stack_version, compare_versions, get_major_version -from ambari_commons.os_check import OSCheck -from resource_management.libraries.script.script import Script -from resource_management.libraries.functions import get_kinit_path -from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources -from resource_management.libraries.resources.hdfs_resource import HdfsResource -from resource_management.libraries.functions.stack_features import check_stack_feature -from resource_management.libraries.functions.stack_features import get_stack_feature_version -from resource_management.libraries.functions import StackFeature -from ambari_commons.constants import AMBARI_SUDO_BINARY - -config = Script.get_config() -tmp_dir = Script.get_tmp_dir() -artifact_dir = tmp_dir + "/AMBARI-artifacts" - -version_for_stack_feature_checks = get_stack_feature_version(config) -stack_supports_hadoop_custom_extensions = check_stack_feature(StackFeature.HADOOP_CUSTOM_EXTENSIONS, version_for_stack_feature_checks) - -sudo = AMBARI_SUDO_BINARY - -# Global flag enabling or disabling the sysprep feature -host_sys_prepped = default("/ambariLevelParams/host_sys_prepped", False) - -# Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/ -# This is required if tarballs are going to be copied to HDFS, so set to False -sysprep_skip_copy_fast_jar_hdfs = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_copy_fast_jar_hdfs", False) - -# Whether to skip setting up the unlimited key JCE policy -sysprep_skip_setup_jce = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_setup_jce", False) - -stack_version_unformatted = config['clusterLevelParams']['stack_version'] -stack_version_formatted = format_stack_version(stack_version_unformatted) -major_stack_version = get_major_version(stack_version_formatted) - -dfs_type = default("/clusterLevelParams/dfs_type", "") -hadoop_conf_dir = "/etc/hadoop/conf" -component_list = default("/localComponents", []) - -hdfs_tmp_dir = default("/configurations/hadoop-env/hdfs_tmp_dir", "/tmp") - -hadoop_metrics2_properties_content = None -if 'hadoop-metrics2.properties' in config['configurations']: - hadoop_metrics2_properties_content = config['configurations']['hadoop-metrics2.properties']['content'] - -hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec") -hadoop_lib_home = stack_select.get_hadoop_dir("lib") -hadoop_bin = stack_select.get_hadoop_dir("sbin") - -mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*" -hadoop_home = stack_select.get_hadoop_dir("home") -create_lib_snappy_symlinks = False - -current_service = config['serviceName'] - -#security params -security_enabled = config['configurations']['cluster-env']['security_enabled'] - -ambari_server_resources_url = default("/ambariLevelParams/jdk_location", None) -if ambari_server_resources_url is not None and ambari_server_resources_url.endswith('/'): - ambari_server_resources_url = ambari_server_resources_url[:-1] - -# Unlimited key JCE policy params -jce_policy_zip = default("/ambariLevelParams/jce_name", None) # None when jdk is already installed by user -unlimited_key_jce_required = default("/componentLevelParams/unlimited_key_jce_required", False) -jdk_name = default("/ambariLevelParams/jdk_name", None) -java_home = default("/ambariLevelParams/java_home", None) -java_exec = "{0}/bin/java".format(java_home) if java_home is not None else "/bin/java" - -#users and groups -has_hadoop_env = 'hadoop-env' in config['configurations'] -mapred_user = config['configurations']['mapred-env']['mapred_user'] -hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] -yarn_user = config['configurations']['yarn-env']['yarn_user'] - -user_group = config['configurations']['cluster-env']['user_group'] - -#hosts -hostname = config['agentLevelParams']['hostname'] -ambari_server_hostname = config['ambariLevelParams']['ambari_server_host'] -rm_host = default("/clusterHostInfo/resourcemanager_hosts", []) -slave_hosts = default("/clusterHostInfo/datanode_hosts", []) -oozie_servers = default("/clusterHostInfo/oozie_server", []) -hcat_server_hosts = default("/clusterHostInfo/webhcat_server_hosts", []) -hive_server_host = default("/clusterHostInfo/hive_server_hosts", []) -hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", []) -hs_host = default("/clusterHostInfo/historyserver_hosts", []) -jtnode_host = default("/clusterHostInfo/jtnode_hosts", []) -namenode_hosts = default("/clusterHostInfo/namenode_hosts", []) -hdfs_client_hosts = default("/clusterHostInfo/hdfs_client_hosts", []) -zk_hosts = default("/clusterHostInfo/zookeeper_server_hosts", []) -ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_hosts", []) -cluster_name = config["clusterName"] -set_instanceId = "false" -if 'cluster-env' in config['configurations'] and \ - 'metrics_collector_external_hosts' in config['configurations']['cluster-env']: - ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts'] - set_instanceId = "true" -else: - ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", [])) - -has_namenode = len(namenode_hosts) > 0 -has_hdfs_clients = len(hdfs_client_hosts) > 0 -has_hdfs = has_hdfs_clients or has_namenode -has_resourcemanager = not len(rm_host) == 0 -has_slaves = not len(slave_hosts) == 0 -has_oozie_server = not len(oozie_servers) == 0 -has_hcat_server_host = not len(hcat_server_hosts) == 0 -has_hive_server_host = not len(hive_server_host) == 0 -has_hbase_masters = not len(hbase_master_hosts) == 0 -has_zk_host = not len(zk_hosts) == 0 -has_ganglia_server = not len(ganglia_server_hosts) == 0 -has_metric_collector = not len(ams_collector_hosts) == 0 - -is_namenode_master = hostname in namenode_hosts -is_jtnode_master = hostname in jtnode_host -is_rmnode_master = hostname in rm_host -is_hsnode_master = hostname in hs_host -is_hbase_master = hostname in hbase_master_hosts -is_slave = hostname in slave_hosts - -if has_ganglia_server: - ganglia_server_host = ganglia_server_hosts[0] - -metric_collector_port = None -if has_metric_collector: - if 'cluster-env' in config['configurations'] and \ - 'metrics_collector_external_port' in config['configurations']['cluster-env']: - metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port'] - else: - metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188") - if metric_collector_web_address.find(':') != -1: - metric_collector_port = metric_collector_web_address.split(':')[1] - else: - metric_collector_port = '6188' - if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY": - metric_collector_protocol = 'https' - else: - metric_collector_protocol = 'http' - metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "") - metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "") - metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "") - metric_legacy_hadoop_sink = check_stack_feature(StackFeature.AMS_LEGACY_HADOOP_SINK, version_for_stack_feature_checks) - - pass - -metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60) -metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10) - -host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True) -host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888) -is_aggregation_https_enabled = False -if default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.http.policy", "HTTP_ONLY") == "HTTPS_ONLY": - host_in_memory_aggregation_protocol = 'https' - is_aggregation_https_enabled = True -else: - host_in_memory_aggregation_protocol = 'http' - -# Cluster Zookeeper quorum -zookeeper_quorum = None -if has_zk_host: - if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']: - zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort'] - else: - zookeeper_clientPort = '2181' - zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(config['clusterHostInfo']['zookeeper_server_hosts']) - # last port config - zookeeper_quorum += ':' + zookeeper_clientPort - -#hadoop params - -if has_namenode or dfs_type == 'HCFS': - hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}") - hadoop_conf_dir = conf_select.get_hadoop_conf_dir() - task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties") - -hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix'] -hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix'] -hbase_tmp_dir = "/tmp/hbase-hbase" -#db params -oracle_driver_symlink_url = format("{ambari_server_resources_url}/oracle-jdbc-driver.jar") -mysql_driver_symlink_url = format("{ambari_server_resources_url}/mysql-jdbc-driver.jar") - -if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']: - rca_enabled = config['configurations']['hadoop-env']['rca_enabled'] -else: - rca_enabled = False -rca_disabled_prefix = "###" -if rca_enabled == True: - rca_prefix = "" -else: - rca_prefix = rca_disabled_prefix - -#hadoop-env.sh - -jsvc_path = "/usr/lib/bigtop-utils" - -hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize'] -namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize'] -namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize'] -namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize'] -namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m") -namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m") - -jtnode_opt_newsize = "200m" -jtnode_opt_maxnewsize = "200m" -jtnode_heapsize = "1024m" -ttnode_heapsize = "1024m" - -dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize'] -mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce") -mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce") - -#log4j.properties - -yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn") - -dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None) - -# Hdfs log4j settings -hadoop_log_max_backup_size = default('configurations/hdfs-log4j/hadoop_log_max_backup_size', 256) -hadoop_log_number_of_backup_files = default('configurations/hdfs-log4j/hadoop_log_number_of_backup_files', 10) -hadoop_security_log_max_backup_size = default('configurations/hdfs-log4j/hadoop_security_log_max_backup_size', 256) -hadoop_security_log_number_of_backup_files = default('configurations/hdfs-log4j/hadoop_security_log_number_of_backup_files', 20) - -# Yarn log4j settings -yarn_rm_summary_log_max_backup_size = default('configurations/yarn-log4j/yarn_rm_summary_log_max_backup_size', 256) -yarn_rm_summary_log_number_of_backup_files = default('configurations/yarn-log4j/yarn_rm_summary_log_number_of_backup_files', 20) - -#log4j.properties -if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])): - log4j_props = config['configurations']['hdfs-log4j']['content'] - if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])): - log4j_props += config['configurations']['yarn-log4j']['content'] -else: - log4j_props = None - -refresh_topology = False -command_params = config["commandParams"] if "commandParams" in config else None -if command_params is not None: - refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False - -ambari_java_home = default("/commandParams/ambari_java_home", None) -ambari_jdk_name = default("/commandParams/ambari_jdk_name", None) -ambari_jce_name = default("/commandParams/ambari_jce_name", None) - -ambari_libs_dir = "/var/lib/ambari-agent/lib" -is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled'] -default_fs = config['configurations']['core-site']['fs.defaultFS'] - -#host info -all_hosts = default("/clusterHostInfo/all_hosts", []) -all_racks = default("/clusterHostInfo/all_racks", []) -all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", []) -slave_hosts = default("/clusterHostInfo/datanode_hosts", []) - -#topology files -net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py" -net_topology_script_dir = os.path.dirname(net_topology_script_file_path) -net_topology_mapping_data_file_name = 'topology_mappings.data' -net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name) - -#Added logic to create /tmp and /user directory for HCFS stack. -has_core_site = 'core-site' in config['configurations'] -hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] -kinit_path_local = get_kinit_path() -stack_version_unformatted = config['clusterLevelParams']['stack_version'] -stack_version_formatted = format_stack_version(stack_version_unformatted) -hadoop_bin_dir = stack_select.get_hadoop_dir("bin") -hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None) -hdfs_site = config['configurations']['hdfs-site'] -smoke_user = config['configurations']['cluster-env']['smokeuser'] -smoke_hdfs_user_dir = format("/user/{smoke_user}") -smoke_hdfs_user_mode = 0770 - - -##### Namenode RPC ports - metrics config section start ##### - -# Figure out the rpc ports for current namenode -nn_rpc_client_port = None -nn_rpc_dn_port = None -nn_rpc_healthcheck_port = None - -namenode_id = None -namenode_rpc = None - -dfs_ha_enabled = False -dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None) -if dfs_ha_nameservices is None: - dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None) -dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None) - -dfs_ha_namemodes_ids_list = [] -other_namenode_id = None - -if dfs_ha_namenode_ids: - dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",") - dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list) - if dfs_ha_namenode_ids_array_len > 1: - dfs_ha_enabled = True - -if dfs_ha_enabled: - for nn_id in dfs_ha_namemodes_ids_list: - nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')] - if hostname.lower() in nn_host.lower(): - namenode_id = nn_id - namenode_rpc = nn_host - pass - pass -else: - namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', default_fs) - -# if HDFS is not installed in the cluster, then don't try to access namenode_rpc -if has_namenode and namenode_rpc and "core-site" in config['configurations']: - port_str = namenode_rpc.split(':')[-1].strip() - try: - nn_rpc_client_port = int(port_str) - except ValueError: - nn_rpc_client_port = None - -if dfs_ha_enabled: - dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None) - dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}'), None) -else: - dfs_service_rpc_address = default('/configurations/hdfs-site/dfs.namenode.servicerpc-address', None) - dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address'), None) - -if dfs_service_rpc_address: - nn_rpc_dn_port = dfs_service_rpc_address.split(':')[1].strip() - -if dfs_lifeline_rpc_address: - nn_rpc_healthcheck_port = dfs_lifeline_rpc_address.split(':')[1].strip() - -is_nn_client_port_configured = False if nn_rpc_client_port is None else True -is_nn_dn_port_configured = False if nn_rpc_dn_port is None else True -is_nn_healthcheck_port_configured = False if nn_rpc_healthcheck_port is None else True - -##### end ##### - -import functools -#create partial functions with common arguments for every HdfsResource call -#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code -HdfsResource = functools.partial( - HdfsResource, - user=hdfs_user, - hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore", - security_enabled = security_enabled, - keytab = hdfs_user_keytab, - kinit_path_local = kinit_path_local, - hadoop_bin_dir = hadoop_bin_dir, - hadoop_conf_dir = hadoop_conf_dir, - principal_name = hdfs_principal_name, - hdfs_site = hdfs_site, - default_fs = default_fs, - immutable_paths = get_not_managed_resources(), - dfs_type = dfs_type -) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/scripts/rack_awareness.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/scripts/rack_awareness.py deleted file mode 100644 index 48158bb820..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/scripts/rack_awareness.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python - -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" - -from resource_management.core.resources import File -from resource_management.core.source import StaticFile, Template -from resource_management.libraries.functions import format - - -def create_topology_mapping(): - import params - - File(params.net_topology_mapping_data_file_path, - content=Template("topology_mappings.data.j2"), - owner=params.hdfs_user, - group=params.user_group, - mode=0644, - only_if=format("test -d {net_topology_script_dir}")) - -def create_topology_script(): - import params - - File(params.net_topology_script_file_path, - content=StaticFile('topology_script.py'), - mode=0755, - only_if=format("test -d {net_topology_script_dir}")) - -def create_topology_script_and_mapping(): - import params - if params.has_hadoop_env: - create_topology_mapping() - create_topology_script() diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/scripts/shared_initialization.py deleted file mode 100644 index ce6b869f88..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/scripts/shared_initialization.py +++ /dev/null @@ -1,262 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -import os -from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil -from resource_management.core.resources.jcepolicyinfo import JcePolicyInfo - -from resource_management import * - -def setup_hadoop(): - """ - Setup hadoop files and directories - """ - import params - - Execute(("setenforce","0"), - only_if="test -f /selinux/enforce", - not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)", - sudo=True, - ) - - #directories - if params.has_namenode or params.dfs_type == 'HCFS': - Directory(params.hdfs_log_dir_prefix, - create_parents = True, - owner='root', - group=params.user_group, - mode=0775, - cd_access='a', - ) - if params.has_namenode: - Directory(params.hadoop_pid_dir_prefix, - create_parents = True, - owner='root', - group='root', - cd_access='a', - ) - Directory(format("{hadoop_pid_dir_prefix}/{hdfs_user}"), - owner=params.hdfs_user, - cd_access='a', - ) - - Directory(params.hadoop_tmp_dir, - create_parents = True, - owner=params.hdfs_user, - cd_access='a', - ) - #files - if params.security_enabled: - tc_owner = "root" - else: - tc_owner = params.hdfs_user - - if os.path.exists(params.hadoop_conf_dir): - File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'), - owner=tc_owner, - content=Template('commons-logging.properties.j2') - ) - - health_check_template_name = "health_check" - File(os.path.join(params.hadoop_conf_dir, health_check_template_name), - owner=tc_owner, - content=Template(health_check_template_name + ".j2") - ) - - log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties") - if (params.log4j_props != None): - File(log4j_filename, - mode=0644, - group=params.user_group, - owner=params.hdfs_user, - content=InlineTemplate(params.log4j_props) - ) - elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))): - File(log4j_filename, - mode=0644, - group=params.user_group, - owner=params.hdfs_user, - ) - - create_microsoft_r_dir() - - if params.has_hdfs or params.dfs_type == 'HCFS': - # if WebHDFS is not enabled we need this jar to create hadoop folders and copy tarballs to HDFS. - if params.sysprep_skip_copy_fast_jar_hdfs: - print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped" - elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.dfs_type): - # for source-code of jar goto contrib/fast-hdfs-resource - File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"), - mode=0644, - content=StaticFile("fast-hdfs-resource.jar") - ) - if os.path.exists(params.hadoop_conf_dir): - if params.hadoop_metrics2_properties_content: - File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"), - owner=params.hdfs_user, - group=params.user_group, - content=InlineTemplate(params.hadoop_metrics2_properties_content) - ) - else: - File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"), - owner=params.hdfs_user, - group=params.user_group, - content=Template("hadoop-metrics2.properties.j2") - ) - - if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list: - create_dirs() - - -def setup_configs(): - """ - Creates configs for services HDFS mapred - """ - import params - - if params.has_namenode or params.dfs_type == 'HCFS': - if os.path.exists(params.hadoop_conf_dir): - File(params.task_log4j_properties_location, - content=StaticFile("task-log4j.properties"), - mode=0755 - ) - - if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')): - File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'), - owner=params.hdfs_user, - group=params.user_group - ) - if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')): - File(os.path.join(params.hadoop_conf_dir, 'masters'), - owner=params.hdfs_user, - group=params.user_group - ) - -def create_javahome_symlink(): - if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"): - Directory("/usr/jdk64/", - create_parents = True, - ) - Link("/usr/jdk/jdk1.6.0_31", - to="/usr/jdk64/jdk1.6.0_31", - ) - -def create_dirs(): - import params - params.HdfsResource(params.hdfs_tmp_dir, - type="directory", - action="create_on_execute", - owner=params.hdfs_user, - mode=0777 - ) - params.HdfsResource(params.smoke_hdfs_user_dir, - type="directory", - action="create_on_execute", - owner=params.smoke_user, - mode=params.smoke_hdfs_user_mode - ) - params.HdfsResource(None, - action="execute" - ) - -def create_microsoft_r_dir(): - import params - if 'MICROSOFT_R_NODE_CLIENT' in params.component_list and params.default_fs: - directory = '/user/RevoShare' - try: - params.HdfsResource(directory, - type="directory", - action="create_on_execute", - owner=params.hdfs_user, - mode=0777) - params.HdfsResource(None, action="execute") - except Exception as exception: - Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception))) - -def setup_unlimited_key_jce_policy(): - """ - Sets up the unlimited key JCE policy if needed. (sets up ambari JCE as well if ambari and the stack use different JDK) - """ - import params - __setup_unlimited_key_jce_policy(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name, custom_jce_name = params.jce_policy_zip) - if params.ambari_jce_name and params.ambari_jce_name != params.jce_policy_zip: - __setup_unlimited_key_jce_policy(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name, custom_jce_name = params.ambari_jce_name) - -def __setup_unlimited_key_jce_policy(custom_java_home, custom_jdk_name, custom_jce_name): - """ - Sets up the unlimited key JCE policy if needed. - - The following criteria must be met: - - * The cluster has not been previously prepared (sys preped) - cluster-env/sysprep_skip_setup_jce = False - * Ambari is managing the host's JVM - /ambariLevelParams/jdk_name is set - * Either security is enabled OR a service requires it - /componentLevelParams/unlimited_key_jce_required = True - * The unlimited key JCE policy has not already been installed - - If the conditions are met, the following steps are taken to install the unlimited key JCE policy JARs - - 1. The unlimited key JCE policy ZIP file is downloaded from the Ambari server and stored in the - Ambari agent's temporary directory - 2. The existing JCE policy JAR files are deleted - 3. The downloaded ZIP file is unzipped into the proper JCE policy directory - - :return: None - """ - import params - - if params.sysprep_skip_setup_jce: - Logger.info("Skipping unlimited key JCE policy check and setup since the host is sys prepped") - - elif not custom_jdk_name: - Logger.info("Skipping unlimited key JCE policy check and setup since the Java VM is not managed by Ambari") - - elif not params.unlimited_key_jce_required: - Logger.info("Skipping unlimited key JCE policy check and setup since it is not required") - - else: - jcePolicyInfo = JcePolicyInfo(custom_java_home) - - if jcePolicyInfo.is_unlimited_key_jce_policy(): - Logger.info("The unlimited key JCE policy is required, and appears to have been installed.") - - elif custom_jce_name is None: - raise Fail("The unlimited key JCE policy needs to be installed; however the JCE policy zip is not specified.") - - else: - Logger.info("The unlimited key JCE policy is required, and needs to be installed.") - - jce_zip_target = format("{artifact_dir}/{custom_jce_name}") - jce_zip_source = format("{ambari_server_resources_url}/{custom_jce_name}") - java_security_dir = format("{custom_java_home}/jre/lib/security") - - Logger.debug("Downloading the unlimited key JCE policy files from {0} to {1}.".format(jce_zip_source, jce_zip_target)) - Directory(params.artifact_dir, create_parents=True) - File(jce_zip_target, content=DownloadSource(jce_zip_source)) - - Logger.debug("Removing existing JCE policy JAR files: {0}.".format(java_security_dir)) - File(format("{java_security_dir}/US_export_policy.jar"), action="delete") - File(format("{java_security_dir}/local_policy.jar"), action="delete") - - Logger.debug("Unzipping the unlimited key JCE policy files from {0} into {1}.".format(jce_zip_target, java_security_dir)) - extract_cmd = ("unzip", "-o", "-j", "-q", jce_zip_target, "-d", java_security_dir) - Execute(extract_cmd, - only_if=format("test -e {java_security_dir} && test -f {jce_zip_target}"), - path=['/bin/', '/usr/bin'], - sudo=True - ) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/commons-logging.properties.j2 deleted file mode 100644 index 2197ba5c17..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/commons-logging.properties.j2 +++ /dev/null @@ -1,43 +0,0 @@ -{# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#} - -#/* -# * Licensed to the Apache Software Foundation (ASF) under one -# * or more contributor license agreements. See the NOTICE file -# * distributed with this work for additional information -# * regarding copyright ownership. The ASF licenses this file -# * to you under the Apache License, Version 2.0 (the -# * "License"); you may not use this file except in compliance -# * with the License. You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -#Logging Implementation - -#Log4J -org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger - -#JDK Logger -#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/exclude_hosts_list.j2 deleted file mode 100644 index 1adba80cbd..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/exclude_hosts_list.j2 +++ /dev/null @@ -1,21 +0,0 @@ -{# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#} - -{% for host in hdfs_exclude_file %} -{{host}} -{% endfor %} diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 deleted file mode 100644 index 49be9c4aba..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 +++ /dev/null @@ -1,114 +0,0 @@ -{# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#} - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# syntax: [prefix].[source|sink|jmx].[instance].[options] -# See package.html for org.apache.hadoop.metrics2 for details - -{% if has_ganglia_server %} -*.period=60 - -*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31 -*.sink.ganglia.period=10 - -# default for supportsparse is false -*.sink.ganglia.supportsparse=true - -.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both -.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40 - -# Hook up to the server -namenode.sink.ganglia.servers={{ganglia_server_host}}:8661 -datanode.sink.ganglia.servers={{ganglia_server_host}}:8659 -jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662 -tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658 -maptask.sink.ganglia.servers={{ganglia_server_host}}:8660 -reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660 -resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664 -nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657 -historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666 -journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654 -nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649 -supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650 - -resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue - -{% endif %} - -{% if has_metric_collector %} - -*.period={{metrics_collection_period}} -{% if metric_legacy_hadoop_sink %} -*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink-legacy.jar -{% else %} -*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar -{% endif %} -*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink -*.sink.timeline.period={{metrics_collection_period}} -*.sink.timeline.sendInterval={{metrics_report_interval}}000 -*.sink.timeline.slave.host.name={{hostname}} -*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}} -*.sink.timeline.protocol={{metric_collector_protocol}} -*.sink.timeline.port={{metric_collector_port}} -*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}} -*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}} -{% if is_aggregation_https_enabled %} -*.sink.timeline.host_in_memory_aggregation_protocol = {{host_in_memory_aggregation_protocol}} -{% endif %} - -# HTTPS properties -*.sink.timeline.truststore.path = {{metric_truststore_path}} -*.sink.timeline.truststore.type = {{metric_truststore_type}} -*.sink.timeline.truststore.password = {{metric_truststore_password}} - -datanode.sink.timeline.collector.hosts={{ams_collector_hosts}} -namenode.sink.timeline.collector.hosts={{ams_collector_hosts}} -resourcemanager.sink.timeline.collector.hosts={{ams_collector_hosts}} -nodemanager.sink.timeline.collector.hosts={{ams_collector_hosts}} -jobhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}} -journalnode.sink.timeline.collector.hosts={{ams_collector_hosts}} -applicationhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}} - -resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue - -{% if is_nn_client_port_configured %} -# Namenode rpc ports customization -namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}} -{% endif %} -{% if is_nn_dn_port_configured %} -namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}} -{% endif %} -{% if is_nn_healthcheck_port_configured %} -namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}} -{% endif %} - -{% endif %} diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/health_check.j2 deleted file mode 100644 index 0a03d17818..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/health_check.j2 +++ /dev/null @@ -1,81 +0,0 @@ -{# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#} - -#!/bin/bash -# -#/* -# * Licensed to the Apache Software Foundation (ASF) under one -# * or more contributor license agreements. See the NOTICE file -# * distributed with this work for additional information -# * regarding copyright ownership. The ASF licenses this file -# * to you under the Apache License, Version 2.0 (the -# * "License"); you may not use this file except in compliance -# * with the License. You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -err=0; - -function check_disks { - - for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do - fsdev="" - fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`; - if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then - msg_="$msg_ $m(u)" - else - msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`" - fi - done - - if [ -z "$msg_" ] ; then - echo "disks ok" ; exit 0 - else - echo "$msg_" ; exit 2 - fi - -} - -# Run all checks -for check in disks ; do - msg=`check_${check}` ; - if [ $? -eq 0 ] ; then - ok_msg="$ok_msg$msg," - else - err_msg="$err_msg$msg," - fi -done - -if [ ! -z "$err_msg" ] ; then - echo -n "ERROR $err_msg " -fi -if [ ! -z "$ok_msg" ] ; then - echo -n "OK: $ok_msg" -fi - -echo - -# Success! -exit 0 diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/include_hosts_list.j2 deleted file mode 100644 index 4a9e71386a..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/include_hosts_list.j2 +++ /dev/null @@ -1,21 +0,0 @@ -{# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#} - -{% for host in slave_hosts %} -{{host}} -{% endfor %} diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/topology_mappings.data.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/topology_mappings.data.j2 deleted file mode 100644 index 15034d6306..0000000000 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/hooks/before-START/templates/topology_mappings.data.j2 +++ /dev/null @@ -1,24 +0,0 @@ -{# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 - # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#} -[network_topology] -{% for host in all_hosts %} -{% if host in slave_hosts %} -{{host}}={{all_racks[loop.index-1]}} -{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}} -{% endif %} -{% endfor %} --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@ambari.apache.org For additional commands, e-mail: commits-h...@ambari.apache.org