This is an automated email from the ASF dual-hosted git repository. wuzhiguo pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/ambari.git
The following commit(s) were added to refs/heads/trunk by this push: new 721a000402 AMBARI-25749: Introduce bigtop-select to BIGTOP stack (#3381) 721a000402 is described below commit 721a000402a26af41cbf0f636a2ae14851d79ca4 Author: timyuer <524860...@qq.com> AuthorDate: Sat Oct 15 16:25:53 2022 +0800 AMBARI-25749: Introduce bigtop-select to BIGTOP stack (#3381) --- .../libraries/functions/conf_select.py | 2 +- .../libraries/functions/stack_select.py | 2 +- .../libraries/functions/version.py | 2 - .../scripts/shared_initialization.py | 6 +- .../BIGTOP/3.2.0/configuration/cluster-env.xml | 15 + .../BIGTOP/3.2.0/properties/stack_features.json | 117 ++--- .../BIGTOP/3.2.0/properties/stack_packages.json | 530 +++++++++++++++++++++ .../BIGTOP/3.2.0/properties/stack_tools.json | 8 +- .../services/KAFKA/package/scripts/kafka_broker.py | 2 +- .../3.2.0/services/KAFKA/package/scripts/params.py | 9 +- .../ZOOKEEPER/package/scripts/params_linux.py | 1 - .../ZOOKEEPER/package/scripts/status_params.py | 3 - ambari-server/src/test/python/TestVersion.py | 10 +- 13 files changed, 624 insertions(+), 83 deletions(-) diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py index 500bbf7f27..4474606432 100644 --- a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py +++ b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py @@ -44,7 +44,7 @@ from resource_management.libraries.functions import StackFeature def _get_cmd(command, package, version): conf_selector_path = stack_tools.get_stack_tool_path(stack_tools.CONF_SELECTOR_NAME) - return ('ambari-python-wrap', conf_selector_path, command, '--package', package, '--stack-version', version, '--conf-version', '0') + return ('ambari-python-wrap', conf_selector_path, command, '--package', package, '--stack-version', version) def _valid(stack_name, package, ver): return (ver and check_stack_feature(StackFeature.CONFIG_VERSIONING, ver)) diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py index d5e86d343f..1aab6e3fba 100644 --- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py +++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py @@ -479,7 +479,7 @@ def get_stack_version_before_install(component_name): component_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, "current", component_name) stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME) if os.path.islink(component_dir): - stack_version = os.path.basename(os.path.dirname(os.readlink(component_dir))) + stack_version = os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(os.readlink(component_dir))))) match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', stack_version) if match is None: Logger.info('Failed to get extracted version with {0} in method get_stack_version_before_install'.format(stack_selector_name)) diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/version.py b/ambari-common/src/main/python/resource_management/libraries/functions/version.py index 0c34de10ad..7d6661fd5c 100644 --- a/ambari-common/src/main/python/resource_management/libraries/functions/version.py +++ b/ambari-common/src/main/python/resource_management/libraries/functions/version.py @@ -60,8 +60,6 @@ def format_stack_version(value): if strip_dots.isdigit(): normalized = _normalize(str(value)) if len(normalized) == 2: - normalized = normalized + [0, 0] - elif len(normalized) == 3: normalized = normalized + [0, ] normalized = [str(x) for x in normalized] # need to convert each number into a string return ".".join(normalized) diff --git a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/shared_initialization.py index f4a5e15c2e..6de7300d13 100644 --- a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/shared_initialization.py +++ b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/shared_initialization.py @@ -29,9 +29,9 @@ def install_packages(): return packages = ['unzip', 'curl'] - # if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0: - # stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME) - # packages.append(stack_selector_package) + if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '3.2.0') >= 0: + stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME) + packages.append(stack_selector_package) Package(packages, retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability, retry_count=params.agent_stack_retry_count) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/configuration/cluster-env.xml index ad0a5fa818..e237f2f648 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/configuration/cluster-env.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/configuration/cluster-env.xml @@ -195,6 +195,21 @@ gpgcheck=0</value> <visible>false</visible> </value-attributes> <on-ambari-upgrade add="true"/> + </property> + <!-- Define stack_packages property in the base stack. DO NOT override this property for each stack version --> + <property> + <name>stack_packages</name> + <value/> + <description>Associations between component and stack-select tools.</description> + <property-type>VALUE_FROM_PROPERTY_FILE</property-type> + <value-attributes> + <property-file-name>stack_packages.json</property-file-name> + <property-file-type>json</property-file-type> + <read-only>true</read-only> + <overridable>false</overridable> + <visible>false</visible> + </value-attributes> + <on-ambari-upgrade add="true"/> </property> <!-- Define stack_features property in the base stack. DO NOT override this property for each stack version --> <property> diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/properties/stack_features.json index c8c2c082cf..2d8b7669d8 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/properties/stack_features.json +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/properties/stack_features.json @@ -1,58 +1,61 @@ { -"BIGTOP": { - "stack_features": [ - { - "name": "snappy", - "description": "Snappy compressor/decompressor support", - "min_version": "2.0.0.0", - "max_version": "2.2.0.0" - }, - { - "name": "lzo", - "description": "LZO libraries support", - "min_version": "2.2.1.0" - }, - { - "name": "copy_tarball_to_hdfs", - "description": "Copy tarball to HDFS support (AMBARI-12113)", - "min_version": "2.2.0.0" - }, - { - "name": "hive_metastore_upgrade_schema", - "description": "Hive metastore upgrade schema support (AMBARI-11176)", - "min_version": "2.3.0.0" - }, - { - "name": "hive_webhcat_specific_configs", - "description": "Hive webhcat specific configurations support (AMBARI-12364)", - "min_version": "2.3.0.0" - }, - { - "name": "hive_purge_table", - "description": "Hive purge table support (AMBARI-12260)", - "min_version": "2.3.0.0" - }, - { - "name": "hive_server2_kerberized_env", - "description": "Hive server2 working on kerberized environment (AMBARI-13749)", - "min_version": "2.2.3.0", - "max_version": "2.2.5.0" - }, - { - "name": "hive_env_heapsize", - "description": "Hive heapsize property defined in hive-env (AMBARI-12801)", - "min_version": "2.2.0.0" - }, - { - "name": "hive_metastore_site_support", - "description": "Hive Metastore site support", - "min_version": "2.5.0.0" - }, - { - "name": "kafka_kerberos", - "description": "Kafka Kerberos support (AMBARI-10984)", - "min_version": "1.0.0.0" - } - ] -} -} + "BIGTOP": { + "stack_features": [ + { + "name": "snappy", + "description": "Snappy compressor/decompressor support", + "min_version": "3.2.0" + }, + { + "name": "lzo", + "description": "LZO libraries support", + "min_version": "3.2.0" + }, + { + "name": "copy_tarball_to_hdfs", + "description": "Copy tarball to HDFS support (AMBARI-12113)", + "min_version": "3.2.0" + }, + { + "name": "hive_metastore_upgrade_schema", + "description": "Hive metastore upgrade schema support (AMBARI-11176)", + "min_version": "3.2.0" + }, + { + "name": "hive_webhcat_specific_configs", + "description": "Hive webhcat specific configurations support (AMBARI-12364)", + "min_version": "3.2.0" + }, + { + "name": "hive_purge_table", + "description": "Hive purge table support (AMBARI-12260)", + "min_version": "3.2.0" + }, + { + "name": "hive_server2_kerberized_env", + "description": "Hive server2 working on kerberized environment (AMBARI-13749)", + "min_version": "3.2.0" + }, + { + "name": "hive_env_heapsize", + "description": "Hive heapsize property defined in hive-env (AMBARI-12801)", + "min_version": "3.2.0" + }, + { + "name": "hive_metastore_site_support", + "description": "Hive Metastore site support", + "min_version": "3.2.0" + }, + { + "name": "kafka_kerberos", + "description": "Kafka Kerberos support (AMBARI-10984)", + "min_version": "3.2.0" + }, + { + "name": "rolling_upgrade", + "description": "Rolling upgrade support", + "min_version": "3.2.0" + } + ] + } +} \ No newline at end of file diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/properties/stack_packages.json b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/properties/stack_packages.json new file mode 100644 index 0000000000..1e1bd47a3e --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/properties/stack_packages.json @@ -0,0 +1,530 @@ +{ + "BIGTOP": { + "stack-select": { + "HBASE": { + "HBASE_CLIENT": { + "STACK-SELECT-PACKAGE": "hbase-client", + "INSTALL": [ + "hbase-client" + ], + "PATCH": [ + "hbase-client" + ], + "STANDARD": [ + "hbase-client", + "phoenix-client", + "hadoop-client" + ] + }, + "HBASE_MASTER": { + "STACK-SELECT-PACKAGE": "hbase-master", + "INSTALL": [ + "hbase-master" + ], + "PATCH": [ + "hbase-master" + ], + "STANDARD": [ + "hbase-master" + ] + }, + "HBASE_REGIONSERVER": { + "STACK-SELECT-PACKAGE": "hbase-regionserver", + "INSTALL": [ + "hbase-regionserver" + ], + "PATCH": [ + "hbase-regionserver" + ], + "STANDARD": [ + "hbase-regionserver" + ] + } + }, + "HDFS": { + "DATANODE": { + "STACK-SELECT-PACKAGE": "hadoop-hdfs-datanode", + "INSTALL": [ + "hadoop-hdfs-datanode" + ], + "PATCH": [ + "hadoop-hdfs-datanode" + ], + "STANDARD": [ + "hadoop-hdfs-datanode" + ] + }, + "HDFS_CLIENT": { + "STACK-SELECT-PACKAGE": "hadoop-hdfs-client", + "INSTALL": [ + "hadoop-hdfs-client" + ], + "PATCH": [ + "hadoop-hdfs-client" + ], + "STANDARD": [ + "hadoop-client" + ] + }, + "NAMENODE": { + "STACK-SELECT-PACKAGE": "hadoop-hdfs-namenode", + "INSTALL": [ + "hadoop-hdfs-namenode" + ], + "PATCH": [ + "hadoop-hdfs-namenode" + ], + "STANDARD": [ + "hadoop-hdfs-namenode" + ] + }, + "JOURNALNODE": { + "STACK-SELECT-PACKAGE": "hadoop-hdfs-journalnode", + "INSTALL": [ + "hadoop-hdfs-journalnode" + ], + "PATCH": [ + "hadoop-hdfs-journalnode" + ], + "STANDARD": [ + "hadoop-hdfs-journalnode" + ] + }, + "SECONDARY_NAMENODE": { + "STACK-SELECT-PACKAGE": "hadoop-hdfs-secondarynamenode", + "INSTALL": [ + "hadoop-hdfs-secondarynamenode" + ], + "PATCH": [ + "hadoop-hdfs-secondarynamenode" + ], + "STANDARD": [ + "hadoop-hdfs-secondarynamenode" + ] + }, + "ZKFC": { + "STACK-SELECT-PACKAGE": "hadoop-hdfs-zkfc", + "INSTALL": [ + "hadoop-hdfs-zkfc" + ], + "PATCH": [ + "hadoop-hdfs-zkfc" + ], + "STANDARD": [ + "hadoop-hdfs-zkfc" + ] + } + }, + "HIVE": { + "HIVE_METASTORE": { + "STACK-SELECT-PACKAGE": "hive-metastore", + "INSTALL": [ + "hive-metastore" + ], + "PATCH": [ + "hive-metastore" + ], + "STANDARD": [ + "hive-metastore" + ] + }, + "HIVE_SERVER": { + "STACK-SELECT-PACKAGE": "hive-server2", + "INSTALL": [ + "hive-server2" + ], + "PATCH": [ + "hive-server2" + ], + "STANDARD": [ + "hive-server2" + ] + }, + "HIVE_CLIENT": { + "STACK-SELECT-PACKAGE": "hive-client", + "INSTALL": [ + "hive-client" + ], + "PATCH": [ + "hive-client" + ], + "STANDARD": [ + "hadoop-client" + ] + } + }, + "KAFKA": { + "KAFKA_BROKER": { + "STACK-SELECT-PACKAGE": "kafka-broker", + "INSTALL": [ + "kafka-broker" + ], + "PATCH": [ + "kafka-broker" + ], + "STANDARD": [ + "kafka-broker" + ] + } + }, + "MAPREDUCE2": { + "HISTORYSERVER": { + "STACK-SELECT-PACKAGE": "hadoop-mapreduce-historyserver", + "INSTALL": [ + "hadoop-mapreduce-historyserver" + ], + "PATCH": [ + "hadoop-mapreduce-historyserver" + ], + "STANDARD": [ + "hadoop-mapreduce-historyserver" + ] + }, + "MAPREDUCE2_CLIENT": { + "STACK-SELECT-PACKAGE": "hadoop-mapreduce-client", + "INSTALL": [ + "hadoop-mapreduce-client" + ], + "PATCH": [ + "hadoop-mapreduce-client" + ], + "STANDARD": [ + "hadoop-client" + ] + } + }, + "SPARK": { + "SPARK_CLIENT": { + "STACK-SELECT-PACKAGE": "spark-client", + "INSTALL": [ + "spark-client" + ], + "PATCH": [ + "spark-client" + ], + "STANDARD": [ + "spark-client" + ] + }, + "SPARK_JOBHISTORYSERVER": { + "STACK-SELECT-PACKAGE": "spark-historyserver", + "INSTALL": [ + "spark-historyserver" + ], + "PATCH": [ + "spark-historyserver" + ], + "STANDARD": [ + "spark-historyserver" + ] + }, + "SPARK_THRIFTSERVER": { + "STACK-SELECT-PACKAGE": "spark-thriftserver", + "INSTALL": [ + "spark-thriftserver" + ], + "PATCH": [ + "spark-thriftserver" + ], + "STANDARD": [ + "spark-thriftserver" + ] + } + }, + "FLINK": { + "FLINK_CLIENT": { + "STACK-SELECT-PACKAGE": "flink-client", + "INSTALL": [ + "flink-client" + ], + "PATCH": [ + "flink-client" + ], + "STANDARD": [ + "flink-client" + ] + }, + "FLINK_JOBHISTORYSERVER": { + "STACK-SELECT-PACKAGE": "flink-historyserver", + "INSTALL": [ + "flink-historyserver" + ], + "PATCH": [ + "flink-historyserver" + ], + "STANDARD": [ + "flink-historyserver" + ] + } + }, + "SOLR": { + "SOLR_SERVER": { + "STACK-SELECT-PACKAGE": "solr-server", + "INSTALL": [ + "solr-server" + ], + "PATCH": [ + "solr-server" + ], + "STANDARD": [ + "solr-server" + ] + } + }, + "TEZ": { + "TEZ_CLIENT": { + "STACK-SELECT-PACKAGE": "tez-client", + "INSTALL": [ + "tez-client" + ], + "PATCH": [ + "tez-client" + ], + "STANDARD": [ + "hadoop-client" + ] + } + }, + "YARN": { + "NODEMANAGER": { + "STACK-SELECT-PACKAGE": "hadoop-yarn-nodemanager", + "INSTALL": [ + "hadoop-yarn-nodemanager" + ], + "PATCH": [ + "hadoop-yarn-nodemanager" + ], + "STANDARD": [ + "hadoop-yarn-nodemanager" + ] + }, + "RESOURCEMANAGER": { + "STACK-SELECT-PACKAGE": "hadoop-yarn-resourcemanager", + "INSTALL": [ + "hadoop-yarn-resourcemanager" + ], + "PATCH": [ + "hadoop-yarn-resourcemanager" + ], + "STANDARD": [ + "hadoop-yarn-resourcemanager" + ] + }, + "YARN_CLIENT": { + "STACK-SELECT-PACKAGE": "hadoop-yarn-client", + "INSTALL": [ + "hadoop-yarn-client" + ], + "PATCH": [ + "hadoop-yarn-client" + ], + "STANDARD": [ + "hadoop-client" + ] + } + }, + "ZEPPELIN": { + "ZEPPELIN_MASTER": { + "STACK-SELECT-PACKAGE": "zeppelin-server", + "INSTALL": [ + "zeppelin-server" + ], + "PATCH": [ + "zeppelin-server" + ], + "STANDARD": [ + "zeppelin-server" + ] + } + }, + "ZOOKEEPER": { + "ZOOKEEPER_CLIENT": { + "STACK-SELECT-PACKAGE": "zookeeper-client", + "INSTALL": [ + "zookeeper-client" + ], + "PATCH": [ + "zookeeper-client" + ], + "STANDARD": [ + "zookeeper-client" + ] + }, + "ZOOKEEPER_SERVER": { + "STACK-SELECT-PACKAGE": "zookeeper-server", + "INSTALL": [ + "zookeeper-server" + ], + "PATCH": [ + "zookeeper-server" + ], + "STANDARD": [ + "zookeeper-server" + ] + } + } + }, + "conf-select": { + "hadoop": [ + { + "conf_dir": "/etc/hadoop/conf", + "current_dir": "{0}/current/hadoop-client/conf", + "component": "hadoop-client" + } + ], + "hbase": [ + { + "conf_dir": "/etc/hbase/conf", + "current_dir": "{0}/current/hbase-client/conf", + "component": "hbase-client" + } + ], + "hive": [ + { + "conf_dir": "/etc/hive/conf", + "current_dir": "{0}/current/hive-client/conf", + "component": "hive-client" + } + ], + "hive-hcatalog": [ + { + "conf_dir": "/etc/hive-webhcat/conf", + "prefix": "/etc/hive-webhcat", + "current_dir": "{0}/current/hive-webhcat/etc/webhcat", + "component": "hive-webhcat" + }, + { + "conf_dir": "/etc/hive-hcatalog/conf", + "prefix": "/etc/hive-hcatalog", + "current_dir": "{0}/current/hive-webhcat/etc/hcatalog", + "component": "hive-webhcat" + } + ], + "kafka": [ + { + "conf_dir": "/etc/kafka/conf", + "current_dir": "{0}/current/kafka-broker/conf", + "component": "kafka-broker" + } + ], + "phoenix": [ + { + "conf_dir": "/etc/phoenix/conf", + "current_dir": "{0}/current/phoenix-client/conf", + "component": "phoenix-client" + } + ], + "spark": [ + { + "conf_dir": "/etc/spark/conf", + "current_dir": "{0}/current/spark-client/conf", + "component": "spark-client" + } + ], + "flink": [ + { + "conf_dir": "/etc/flink/conf", + "current_dir": "{0}/current/flink-client/conf", + "component": "flink-client" + } + ], + "solr": [ + { + "conf_dir": "/etc/solr/conf", + "current_dir": "{0}/current/solr-server/conf", + "component": "solr-server" + } + ], + "tez": [ + { + "conf_dir": "/etc/tez/conf", + "current_dir": "{0}/current/tez-client/conf", + "component": "tez-client" + } + ], + "zeppelin": [ + { + "conf_dir": "/etc/zeppelin/conf", + "current_dir": "{0}/current/zeppelin-server/conf", + "component": "zeppelin-server" + } + ], + "zookeeper": [ + { + "conf_dir": "/etc/zookeeper/conf", + "current_dir": "{0}/current/zookeeper-client/conf", + "component": "zookeeper-client" + } + ] + }, + "conf-select-patching": { + "HBASE": { + "packages": [ + "hbase" + ] + }, + "HDFS": { + "packages": [] + }, + "HIVE": { + "packages": [ + "hive", + "hive-hcatalog" + ] + }, + "KAFKA": { + "packages": [ + "kafka" + ] + }, + "MAPREDUCE2": { + "packages": [] + }, + "SPARK": { + "packages": [ + "spark" + ] + }, + "FLINK": { + "packages": [ + "flink" + ] + }, + "SOLR": { + "packages": [ + "solr" + ] + }, + "TEZ": { + "packages": [ + "tez" + ] + }, + "YARN": { + "packages": [] + }, + "ZEPPELIN": { + "packages": [ + "zeppelin" + ] + }, + "ZOOKEEPER": { + "packages": [ + "zookeeper" + ] + } + }, + "upgrade-dependencies": { + "HIVE": [ + "TEZ", + "MAPREDUCE2" + ], + "TEZ": [ + "HIVE" + ], + "MAPREDUCE2": [ + "HIVE" + ] + } + } +} \ No newline at end of file diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/properties/stack_tools.json index ba83c2e740..f2afcd738b 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/properties/stack_tools.json +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/properties/stack_tools.json @@ -2,13 +2,13 @@ "BIGTOP": { "stack_selector": [ "distro-select", - "/usr/bin/distro-select", - "distro-select" + "/usr/lib/bigtop-select/distro-select", + "bigtop-select" ], "conf_selector": [ "conf-select", - "/usr/bin/conf-select", + "/usr/lib/bigtop-select/conf-select", "conf-select" ] } -} +} \ No newline at end of file diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/kafka_broker.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/kafka_broker.py index b59b38c9f9..697471896d 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/kafka_broker.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/kafka_broker.py @@ -77,7 +77,7 @@ class KafkaBroker(Script): kafka_kinit_cmd = format("{kinit_path_local} -kt {kafka_keytab_path} {kafka_jaas_principal};") Execute(kafka_kinit_cmd, user=params.kafka_user) - daemon_cmd = format('source {params.conf_dir}/kafka-env.sh ; {params.kafka_start_cmd}>>/dev/null 2>>{params.kafka_err_file} & echo $!>{params.kafka_pid_file}') + daemon_cmd = format('source {params.conf_dir}/kafka-env.sh ; {params.kafka_start_cmd} >>/dev/null 2>>{params.kafka_err_file} & echo $!>{params.kafka_pid_file}') no_op_test = format('ls {params.kafka_pid_file}>/dev/null 2>&1 && ps -p `cat {params.kafka_pid_file}`>/dev/null 2>&1') try: Execute(daemon_cmd, diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/params.py index d407c602f7..f68442ef6e 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/params.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/package/scripts/params.py @@ -32,6 +32,7 @@ from resource_management.libraries.functions import conf_select from resource_management.libraries.functions import upgrade_summary from resource_management.libraries.functions import get_kinit_path from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources +from resource_management.libraries.functions.format import format # server configurations config = Script.get_config() @@ -58,9 +59,6 @@ hostname = config['agentLevelParams']['hostname'] # default kafka parameters kafka_home = '/usr/lib/kafka' -kafka_start_cmd = kafka_home + "/bin/kafka-server-start.sh " + kafka_home + "/config/server.properties" -kafka_stop_cmd = kafka_home + "/bin/kafka-server-stop.sh " + kafka_home + "/config/server.properties" -kafka_bin = kafka_home + '/bin/kafka' conf_dir = "/etc/kafka/conf" limits_conf_dir = "/etc/security/limits.d" @@ -75,8 +73,9 @@ kafka_delete_topic_enable = default('/configurations/kafka-broker/delete.topic.e # parameters for 2.2+ if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted): kafka_home = os.path.join(stack_root, "current", "kafka-broker") - kafka_bin = os.path.join(kafka_home, "bin", "kafka") - conf_dir = os.path.join(kafka_home, "config") + +kafka_start_cmd = format('{kafka_home}/bin/kafka-server-start.sh {conf_dir}/server.properties') +kafka_stop_cmd = format('{kafka_home}/bin/kafka-server-stop.sh {conf_dir}/server.properties') kafka_user = config['configurations']['kafka-env']['kafka_user'] kafka_log_dir = config['configurations']['kafka-env']['kafka_log_dir'] diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/ZOOKEEPER/package/scripts/params_linux.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/ZOOKEEPER/package/scripts/params_linux.py index de4073a433..2005ff810d 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/ZOOKEEPER/package/scripts/params_linux.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/ZOOKEEPER/package/scripts/params_linux.py @@ -55,7 +55,6 @@ if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, zk_home = format("{stack_root}/current/{component_directory}") zk_bin = format("{stack_root}/current/{component_directory}/bin") zk_cli_shell = format("{stack_root}/current/{component_directory}/bin/zkCli.sh") - config_dir = status_params.config_dir zk_user = config['configurations']['zookeeper-env']['zk_user'] diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/ZOOKEEPER/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/ZOOKEEPER/package/scripts/status_params.py index 5c56326c37..c74dbb1c29 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/ZOOKEEPER/package/scripts/status_params.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/ZOOKEEPER/package/scripts/status_params.py @@ -54,7 +54,4 @@ else: stack_version_formatted = format_stack_version(stack_version_unformatted) stack_root = Script.get_stack_root() - config_dir = "/etc/zookeeper/conf" - if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted): - config_dir = format("{stack_root}/current/{component_directory}/conf") stack_name = default("/clusterLevelParams/stack_name", None) \ No newline at end of file diff --git a/ambari-server/src/test/python/TestVersion.py b/ambari-server/src/test/python/TestVersion.py index cf8dff92ff..d08cfbb260 100644 --- a/ambari-server/src/test/python/TestVersion.py +++ b/ambari-server/src/test/python/TestVersion.py @@ -37,8 +37,8 @@ class TestVersion(TestCase): self.version_module = imp.load_module('version', fp, test_file_path, ('.py', 'rb', imp.PY_SOURCE)) def test_format(self): - l = [("2.2", "2.2.0.0"), - ("2.2.1", "2.2.1.0"), + l = [("2.2", "2.2.0"), + ("2.2.1", "2.2.1"), ("2.2.1.3", "2.2.1.3")] for input, expected in l: @@ -51,13 +51,13 @@ class TestVersion(TestCase): def test_format_with_hyphens(self): actual = self.version_module.format_stack_version("FOO-1.0") - self.assertEqual("1.0.0.0", actual) + self.assertEqual("1.0.0", actual) actual = self.version_module.format_stack_version("1.0.0-1234") - self.assertEqual("1.0.0.0", actual) + self.assertEqual("1.0.0", actual) actual = self.version_module.format_stack_version("FOO-1.0-9999") - self.assertEqual("1.0.0.0", actual) + self.assertEqual("1.0.0", actual) def test_comparison(self): --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@ambari.apache.org For additional commands, e-mail: commits-h...@ambari.apache.org