http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py index 6aff622..83e40c6 100644 --- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py +++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py @@ -78,7 +78,7 @@ def yarn(name = None): ) XmlConfig("core-site.xml", - conf_dir=params.config_dir, + conf_dir=params.hadoop_conf_dir, configurations=params.config['configurations']['core-site'], configuration_attributes=params.config['configuration_attributes']['core-site'], owner=params.hdfs_user, @@ -87,7 +87,7 @@ def yarn(name = None): ) XmlConfig("mapred-site.xml", - conf_dir=params.config_dir, + conf_dir=params.hadoop_conf_dir, configurations=params.config['configurations']['mapred-site'], configuration_attributes=params.config['configuration_attributes']['mapred-site'], owner=params.yarn_user, @@ -96,7 +96,7 @@ def yarn(name = None): ) XmlConfig("yarn-site.xml", - conf_dir=params.config_dir, + conf_dir=params.hadoop_conf_dir, configurations=params.config['configurations']['yarn-site'], configuration_attributes=params.config['configuration_attributes']['yarn-site'], owner=params.yarn_user, @@ -105,7 +105,7 @@ def yarn(name = None): ) XmlConfig("capacity-scheduler.xml", - conf_dir=params.config_dir, + conf_dir=params.hadoop_conf_dir, configurations=params.config['configurations']['capacity-scheduler'], configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'], owner=params.yarn_user, @@ -140,7 +140,7 @@ def yarn(name = None): content=Template('mapreduce.conf.j2') ) - File(format("{config_dir}/yarn-env.sh"), + File(format("{hadoop_conf_dir}/yarn-env.sh"), owner=params.yarn_user, group=params.user_group, mode=0755, @@ -154,7 +154,7 @@ def yarn(name = None): mode=06050 ) - File(format("{config_dir}/container-executor.cfg"), + File(format("{hadoop_conf_dir}/container-executor.cfg"), group=params.user_group, mode=0644, content=Template('container-executor.cfg.j2') @@ -168,7 +168,7 @@ def yarn(name = None): tc_mode = None tc_owner = params.hdfs_user - File(format("{config_dir}/mapred-env.sh"), + File(format("{hadoop_conf_dir}/mapred-env.sh"), owner=tc_owner, content=InlineTemplate(params.mapred_env_sh_template) )
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py index 2dc3792..6016b99 100644 --- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py +++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py @@ -26,15 +26,24 @@ import status_params config = Script.get_config() tmp_dir = Script.get_tmp_dir() -config_dir = "/etc/zookeeper/conf" +#RPM versioning support +rpm_version = default("/configurations/hadoop-env/rpm_version", None) + +#hadoop params +if rpm_version is not None: + config_dir = format('/usr/hdp/{rpm_version}/etc/zookeeper/conf') + zk_bin = format('/usr/hdp/{rpm_version}/zookeeper/bin') + smoke_script = format('/usr/hdp/{rpm_version}/zookeeper/bin/zkCli.sh') +else: + config_dir = "/etc/zookeeper/conf" + zk_bin = '/usr/lib/zookeeper/bin' + smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh" + zk_user = config['configurations']['zookeeper-env']['zk_user'] hostname = config['hostname'] -zk_bin = '/usr/lib/zookeeper/bin' user_group = config['configurations']['cluster-env']['user_group'] zk_env_sh_template = config['configurations']['zookeeper-env']['content'] -smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh" - zk_log_dir = config['configurations']['zookeeper-env']['zk_log_dir'] zk_data_dir = config['configurations']['zookeeper-env']['zk_data_dir'] zk_pid_dir = status_params.zk_pid_dir http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py index 7a61c8a..79bdef3 100644 --- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py +++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py @@ -23,6 +23,17 @@ from status_params import * config = Script.get_config() +#RPM versioning support +rpm_version = default("/configurations/hadoop-env/rpm_version", None) + +#hadoop params +if rpm_version is not None: + hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf") + hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin") +else: + hadoop_conf_dir = "/etc/hadoop/conf" + hadoop_bin_dir = "/usr/bin" + oozie_user = config['configurations']['oozie-env']['oozie_user'] falcon_user = config['configurations']['falcon-env']['falcon_user'] smoke_user = config['configurations']['cluster-env']['smokeuser'] @@ -53,7 +64,6 @@ flacon_apps_dir = '/apps/falcon' #for create_hdfs_directory security_enabled = config['configurations']['cluster-env']['security_enabled'] hostname = config["hostname"] -hadoop_conf_dir = "/etc/hadoop/conf" hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] @@ -67,5 +77,6 @@ HdfsDirectory = functools.partial( hdfs_user=hdfs_user, security_enabled = security_enabled, keytab = hdfs_user_keytab, - kinit_path_local = kinit_path_local + kinit_path_local = kinit_path_local, + bin_dir = hadoop_bin_dir ) http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py index 19668c7..7115de4 100644 --- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py +++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py @@ -37,8 +37,7 @@ nimbus_host = config['configurations']['storm-site']['nimbus.host'] rest_api_port = "8745" rest_api_admin_port = "8746" rest_api_conf_file = format("{conf_dir}/config.yaml") -rest_lib_dir = "/usr/lib/storm/contrib/storm-rest" -java_home = config['hostLevelParams']['java_home'] +rest_lib_dir = default("/configurations/storm-env/rest_lib_dir","/usr/lib/storm/contrib/storm-rest") storm_env_sh_template = config['configurations']['storm-env']['content'] if 'ganglia_server_host' in config['clusterHostInfo'] and \ @@ -48,7 +47,7 @@ if 'ganglia_server_host' in config['clusterHostInfo'] and \ ganglia_report_interval = 60 else: ganglia_installed = False - + security_enabled = config['configurations']['cluster-env']['security_enabled'] if security_enabled: http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/metainfo.xml new file mode 100644 index 0000000..0be6cb6 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/metainfo.xml @@ -0,0 +1,23 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<metainfo> + <versions> + <active>true</active> + </versions> + <extends>2.1</extends> +</metainfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml new file mode 100644 index 0000000..c99f92a --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml @@ -0,0 +1,82 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<reposinfo> + <os type="redhat6"> + <repo> + <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.9.9.9-98</baseurl> + <repoid>HDP-2.9.9.9-98</repoid> + <reponame>HDP</reponame> + </repo> + <repo> + <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0</baseurl> + <repoid>HDP-2.2.0.0</repoid> + <reponame>HDP-2.2</reponame> + </repo> + <repo> + <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/centos6</baseurl> + <repoid>HDP-UTILS-1.1.0.17</repoid> + <reponame>HDP-UTILS</reponame> + </repo> + </os> + <os type="redhat5"> + <repo> + <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos5/2.x/BUILDS/2.9.9.9-98</baseurl> + <repoid>HDP-2.9.9.9-98</repoid> + <reponame>HDP</reponame> + </repo> + <repo> + <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos5/2.x/updates/2.2.0.0</baseurl> + <repoid>HDP-2.2.0.0</repoid> + <reponame>HDP-2.2</reponame> + </repo> + <repo> + <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/centos5</baseurl> + <repoid>HDP-UTILS-1.1.0.17</repoid> + <reponame>HDP-UTILS</reponame> + </repo> + </os> + <os type="suse11"> + <repo> + <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11/2.x/BUILDS/2.9.9.9-98</baseurl> + <repoid>HDP-2.9.9.9-98</repoid> + <reponame>HDP</reponame> + </repo> + <repo> + <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11/2.x/updates/2.2.0.0</baseurl> + <repoid>HDP-2.2.0.0</repoid> + <reponame>HDP-2.2</reponame> + </repo> + <repo> + <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/suse11</baseurl> + <repoid>HDP-UTILS-1.1.0.17</repoid> + <reponame>HDP-UTILS</reponame> + </repo> + </os> + <os type="debian12"> + <repo> + <baseurl>REPLACE_WITH_UBUNTU12_URL</baseurl> + <repoid>HDP-2.1</repoid> + <reponame>HDP</reponame> + </repo> + <repo> + <baseurl>http://dev.hortonworks.com.s3.amazonaws.com/HDP-UTILS-1.1.0.19/repos/ubuntu12</baseurl> + <repoid>HDP-UTILS-1.1.0.19</repoid> + <reponame>HDP-UTILS</reponame> + </repo> + </os> +</reposinfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json new file mode 100644 index 0000000..a6f3e07 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json @@ -0,0 +1,88 @@ +{ + "_comment" : "Record format:", + "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]", + "general_deps" : { + "_comment" : "dependencies for all cases", + "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL", + "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"], + "NIMBUS-START" : ["ZOOKEEPER_SERVER-START", "NODEMANAGER-START", "RESOURCEMANAGER-START"], + "SUPERVISOR-START" : ["NIMBUS-START"], + "STORM_UI_SERVER-START" : ["NIMBUS-START"], + "DRPC_SERVER-START" : ["NIMBUS-START"], + "STORM_REST_API-START" : ["NIMBUS-START", "STORM_UI_SERVER-START", "SUPERVISOR-START", "DRPC_SERVER-START"], + "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"], + "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"], + "OOZIE_SERVER-START": ["NODEMANAGER-START", "RESOURCEMANAGER-START"], + "WEBHCAT_SERVER-START": ["NODEMANAGER-START", "HIVE_SERVER-START"], + "HIVE_METASTORE-START": ["MYSQL_SERVER-START"], + "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START"], + "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"], + "FLUME_HANDLER-START": ["OOZIE_SERVER-START"], + "FALCON_SERVER-START": ["NAMENODE-START", "DATANODE-START", "OOZIE_SERVER-START"], + "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START", + "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START", + "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START", + "NODEMANAGER-START", "RESOURCEMANAGER-START", "ZOOKEEPER_SERVER-START", + "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START", + "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"], + "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"], + "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"], + "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"], + "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"], + "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"], + "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"], + "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"], + "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"], + "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"], + "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"], + "STORM_SERVICE_CHECK-SERVICE_CHECK": ["NIMBUS-START", "SUPERVISOR-START", "STORM_UI_SERVER-START", + "DRPC_SERVER-START"], + "FLUME_SERVICE_CHECK-SERVICE_CHECK": ["FLUME_HANDLER-START"], + "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"], + "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"], + "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"], + "NIMBUS-STOP" : ["SUPERVISOR-STOP", "STORM_UI_SERVER-STOP", "DRPC_SERVER-STOP"] + }, + "_comment" : "GLUSTERFS-specific dependencies", + "optional_glusterfs": { + "HBASE_MASTER-START": ["PEERSTATUS-START"], + "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"] + }, + "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster", + "optional_no_glusterfs": { + "SECONDARY_NAMENODE-START": ["NAMENODE-START"], + "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"], + "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"], + "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"], + "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"], + "APP_TIMELINE_SERVER-START": ["NAMENODE-START", "DATANODE-START"], + "FALCON_SERVER-START": ["NAMENODE-START", "DATANODE-START"], + "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"], + "HIVE_SERVER-START": ["DATANODE-START"], + "WEBHCAT_SERVER-START": ["DATANODE-START"], + "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START", + "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"], + "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START", + "SECONDARY_NAMENODE-START"], + "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", + "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"], + "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"], + "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"], + "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"], + "NAMENODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP", + "HISTORYSERVER-STOP", "HBASE_MASTER-STOP", "FALCON_SERVER-STOP"], + "DATANODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP", + "HISTORYSERVER-STOP", "HBASE_MASTER-STOP", "FALCON_SERVER-STOP"] + }, + "_comment" : "Dependencies that are used in HA NameNode cluster", + "namenode_optional_ha": { + "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"], + "ZKFC-START": ["ZOOKEEPER_SERVER-START"], + "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"] + }, + "_comment" : "Dependencies that are used in ResourceManager HA cluster", + "resourcemanager_optional_ha" : { + "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"] + } +} + http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/metainfo.xml new file mode 100644 index 0000000..4a46139 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/metainfo.xml @@ -0,0 +1,28 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<metainfo> + <schemaVersion>2.0</schemaVersion> + <services> + <service> + <name>FALCON</name> + <displayName>Falcon</displayName> + <comment>Data management and processing platform</comment> + <version>0.6.0.2.2.0.0</version> + </service> + </services> +</metainfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/FLUME/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/FLUME/metainfo.xml new file mode 100644 index 0000000..6b702c8 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/FLUME/metainfo.xml @@ -0,0 +1,40 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<metainfo> + <schemaVersion>2.0</schemaVersion> + <services> + <service> + <name>FLUME</name> + <displayName>Flume</displayName> + <comment>Data management and processing platform</comment> + <version>1.5.0.1.2.9.9.9</version> + + <osSpecifics> + <osSpecific> + <osFamily>any</osFamily> + <packages> + <package> + <name>flume_2_9_9_9_98</name> + </package> + </packages> + </osSpecific> + </osSpecifics> + + </service> + </services> +</metainfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/metainfo.xml new file mode 100644 index 0000000..52cd10d --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/metainfo.xml @@ -0,0 +1,42 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<metainfo> + <schemaVersion>2.0</schemaVersion> + <services> + <service> + <name>HBASE</name> + <displayName>HBase</displayName> + <comment>Non-relational distributed database and centralized service for configuration management & + synchronization + </comment> + <version>0.98.4.2.9.9.9</version> + + <osSpecifics> + <osSpecific> + <osFamily>any</osFamily> + <packages> + <package> + <name>hbase_2_9_9_9_98</name> + </package> + </packages> + </osSpecific> + </osSpecifics> + + </service> + </services> +</metainfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml new file mode 100644 index 0000000..3213506 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml @@ -0,0 +1,29 @@ +<?xml version="1.0"?> +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> +<!-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--> + +<configuration> + <property> + <name>rpm_version</name> + <value>2.9.9.9-98</value> + <description>Hadoop RPM version</description> + </property> +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hdfs-site.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hdfs-site.xml new file mode 100644 index 0000000..4f46cb7 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hdfs-site.xml @@ -0,0 +1,34 @@ +<?xml version="1.0"?> +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> + +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<!-- Put site-specific property overrides in this file. --> + +<configuration supports_final="true"> + + <property> + <name>dfs.hosts.exclude</name> + <value>/usr/hdp/2.9.9.9-98/etc/hadoop/conf/dfs.exclude</value> + <description>Names a file that contains a list of hosts that are + not permitted to connect to the namenode. The full pathname of the + file must be specified. If the value is empty, no hosts are + excluded.</description> + </property> + +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/metainfo.xml new file mode 100644 index 0000000..b520a34 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/metainfo.xml @@ -0,0 +1,68 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<metainfo> + <schemaVersion>2.0</schemaVersion> + <services> + <service> + <name>HDFS</name> + <displayName>HDFS</displayName> + <comment>Apache Hadoop Distributed File System</comment> + <version>2.6.0.2.9.9.9</version> + + <osSpecifics> + <osSpecific> + <osFamily>any</osFamily> + <packages> + <package> + <name>hadoop_2_9_9_9_98</name> + </package> + <package> + <name>hadoop-lzo</name> + </package> + </packages> + </osSpecific> + + <osSpecific> + <osFamily>redhat5,redhat6,suse11</osFamily> + <packages> + <package> + <name>snappy</name> + </package> + <package> + <name>snappy-devel</name> + </package> + <package> + <name>lzo</name> + </package> + <package> + <name>hadoop-lzo-native</name> + </package> + <package> + <name>hadoop_2_9_9_9_98-libhdfs</name> + </package> + <package> + <name>ambari-log4j</name> + </package> + </packages> + </osSpecific> + + </osSpecifics> + + </service> + </services> +</metainfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml new file mode 100644 index 0000000..28567a7 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml @@ -0,0 +1,44 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<metainfo> + <schemaVersion>2.0</schemaVersion> + <services> + <service> + <name>HIVE</name> + <comment>Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service</comment> + <version>0.14.0.2.9.9.9</version> + </service> + + <service> + <name>HCATALOG</name> + <comment>This is comment for HCATALOG service</comment> + <version>0.14.0.2.9.9.9</version> + <osSpecifics> + <osSpecific> + <osFamily>any</osFamily> + <packages> + <package> + <name>hive_2_9_9_9_98-hcatalog</name> + </package> + </packages> + </osSpecific> + </osSpecifics> + </service> + + </services> +</metainfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-site.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-site.xml new file mode 100644 index 0000000..d39f542 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-site.xml @@ -0,0 +1,38 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<configuration supports_final="true"> + + <property> + <name>oozie.service.HadoopAccessorService.hadoop.configurations</name> + <value>*=/usr/hdp/2.9.9.9-98/etc/hadoop/conf</value> + <description> + Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of + the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is + used when there is no exact match for an authority. The HADOOP_CONF_DIR contains + the relevant Hadoop *-site.xml files. If the path is relative is looked within + the Oozie configuration directory; though the path can be absolute (i.e. to point + to Hadoop client conf/ directories in the local filesystem. + </description> + </property> + + + + +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/metainfo.xml new file mode 100644 index 0000000..5c77061 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/metainfo.xml @@ -0,0 +1,28 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<metainfo> + <schemaVersion>2.0</schemaVersion> + <services> + <service> + <name>OOZIE</name> + <comment>System for workflow coordination and execution of Apache Hadoop jobs. This also includes the installation of the optional Oozie Web Console which relies on and will install the <a target="_blank" href="http://www.sencha.com/legal/open-source-faq/">ExtJS</a> Library. + </comment> + <version>4.1.0.2.2.0.0</version> + </service> + </services> +</metainfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/PIG/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/PIG/metainfo.xml new file mode 100644 index 0000000..335993f --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/PIG/metainfo.xml @@ -0,0 +1,41 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<metainfo> + <schemaVersion>2.0</schemaVersion> + <services> + <service> + <name>PIG</name> + <displayName>Pig</displayName> + <comment>Scripting platform for analyzing large datasets</comment> + <version>0.14.0.2.9.9.9</version> + + <osSpecifics> + <osSpecific> + <osFamily>any</osFamily> + <packages> + <package> + <name>pig_2_9_9_9_98</name> + </package> + </packages> + </osSpecific> + </osSpecifics> + + + </service> + </services> +</metainfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/SQOOP/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/SQOOP/metainfo.xml new file mode 100644 index 0000000..f644d74 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/SQOOP/metainfo.xml @@ -0,0 +1,29 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<metainfo> + <schemaVersion>2.0</schemaVersion> + <services> + <service> + <name>SQOOP</name> + <comment>Tool for transferring bulk data between Apache Hadoop and + structured data stores such as relational databases + </comment> + <version>1.4.5.2.2</version> + </service> + </services> +</metainfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-env.xml new file mode 100644 index 0000000..6b2b550 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-env.xml @@ -0,0 +1,29 @@ +<?xml version="1.0"?> +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> +<!-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--> + +<configuration> + <property> + <name>rest_lib_dir</name> + <value>/usr/lib/storm/external/storm-rest</value> + <description></description> + </property> +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-site.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-site.xml new file mode 100644 index 0000000..396af4a --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-site.xml @@ -0,0 +1,54 @@ +<?xml version="1.0"?> +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> +<!-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--> + +<configuration supports_final="true"> + + + <property> + <name>nimbus.childopts</name> + <value>-Xmx1024m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf -javaagent:/usr/lib/storm/external/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/lib/storm/external/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM</value> + <description>This parameter is used by the storm-deploy project to configure the jvm options for the nimbus daemon.</description> + </property> + + <property> + <name>worker.childopts</name> + <value>-Xmx768m -javaagent:/usr/lib/storm/external/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/lib/storm/external/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM</value> + <description>The jvm opts provided to workers launched by this supervisor. All \"%ID%\" substrings are replaced with an identifier for this worker.</description> + </property> + + + + <property> + <name>ui.childopts</name> + <value>-Xmx768m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf</value> + <description>Childopts for Storm UI Java process.</description> + </property> + + <property> + <name>supervisor.childopts</name> + <value>-Xmx256m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/lib/storm/external/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/lib/storm/external/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM</value> + <description>This parameter is used by the storm-deploy project to configure the jvm options for the supervisor daemon.</description> + </property> + + + +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/metainfo.xml new file mode 100644 index 0000000..c25718d --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/metainfo.xml @@ -0,0 +1,29 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<metainfo> + <schemaVersion>2.0</schemaVersion> + <services> + <service> + <name>STORM</name> + <displayName>Storm</displayName> + <comment>Apache Hadoop Stream processing framework</comment> + <version>0.9.3.2.2.0.0</version> + </service> + </services> +</metainfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/metainfo.xml new file mode 100644 index 0000000..25f579a --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/metainfo.xml @@ -0,0 +1,40 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<metainfo> + <schemaVersion>2.0</schemaVersion> + <services> + <service> + <name>TEZ</name> + <displayName>Tez</displayName> + <comment>Tez is the next generation Hadoop Query Processing framework written on top of YARN.</comment> + <version>0.6.0.2.9.9.9</version> + + <osSpecifics> + <osSpecific> + <osFamily>any</osFamily> + <packages> + <package> + <name>tez_2_9_9_9_98</name> + </package> + </packages> + </osSpecific> + </osSpecifics> + + </service> + </services> +</metainfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/configuration/webhcat-site.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/configuration/webhcat-site.xml new file mode 100644 index 0000000..d14be36 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/configuration/webhcat-site.xml @@ -0,0 +1,59 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +--> + +<!-- The default settings for Templeton. --> +<!-- Edit templeton-site.xml to change settings for your local --> +<!-- install. --> + +<configuration supports_final="true"> + + <property> + <name>templeton.hadoop.conf.dir</name> + <value>/usr/hdp/2.9.9.9-98/etc/hadoop/conf</value> + <description>The path to the Hadoop configuration.</description> + </property> + + <property> + <name>templeton.jar</name> + <value>/usr/hdp/2.9.9.9-98/hcatalog/share/webhcat/svr/webhcat.jar</value> + <description>The path to the Templeton jar file.</description> + </property> + + <property> + <name>templeton.libjars</name> + <value>/usr/hdp/2.9.9.9-98/zookeeper/zookeeper.jar</value> + <description>Jars to add the the classpath.</description> + </property> + + + <property> + <name>templeton.hadoop</name> + <value>/usr/hdp/2.9.9.9-98/hadoop/bin/hadoop</value> + <description>The path to the Hadoop executable.</description> + </property> + + + <property> + <name>templeton.hcat</name> + <value>/usr/hdp/2.9.9.9-98/hive/bin/hcat</value> + <description>The path to the hcatalog executable.</description> + </property> + + +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/metainfo.xml new file mode 100644 index 0000000..a05f9e7 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/metainfo.xml @@ -0,0 +1,44 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<metainfo> + <schemaVersion>2.0</schemaVersion> + <services> + <service> + <name>WEBHCAT</name> + <comment>This is comment for WEBHCAT service</comment> + <version>0.14.0.2.9.9.9</version> + <osSpecifics> + <osSpecific> + <osFamily>any</osFamily> + <packages> + <package> + <name>hive_2_9_9_9_98-webhcat</name> + </package> + <package> + <name>webhcat-tar-hive</name> + </package> + <package> + <name>webhcat-tar-pig</name> + </package> + </packages> + </osSpecific> + </osSpecifics> + + </service> + </services> +</metainfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml new file mode 100644 index 0000000..a831936 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml @@ -0,0 +1,36 @@ +<?xml version="1.0"?> +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> + +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<!-- Put site-specific property overrides in this file. --> + +<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude"> + + <property> + <name>mapreduce.admin.user.env</name> + <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/hdp/2.9.9.9-98/hadoop/lib/native/Linux-amd64-64</value> + <description> + Additional execution environment entries for map and reduce task processes. + This is not an additive property. You must preserve the original value if + you want your map and reduce tasks to have access to native libraries (compression, etc) + </description> + </property> + + +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml new file mode 100644 index 0000000..065f57e --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml @@ -0,0 +1,35 @@ +<?xml version="1.0"?> +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<!-- Put site-specific property overrides in this file. --> + +<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude"> + + <property> + <name>yarn.resourcemanager.nodes.exclude-path</name> + <value>/usr/hdp/2.9.9.9-98/etc/hadoop/conf/yarn.exclude</value> + <description> + Names a file that contains a list of hosts that are + not permitted to connect to the resource manager. The full pathname of the + file must be specified. If the value is empty, no hosts are + excluded. + </description> + </property> + +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/metainfo.xml new file mode 100644 index 0000000..7a30894 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/metainfo.xml @@ -0,0 +1,71 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<metainfo> + <schemaVersion>2.0</schemaVersion> + <services> + <service> + <name>YARN</name> + <displayName>YARN</displayName> + <comment>Apache Hadoop NextGen MapReduce (YARN)</comment> + <version>2.6.0.2.9.9.9</version> + <components> + <components> + <component> + <name>APP_TIMELINE_SERVER</name> + <cardinality>1</cardinality> + </component> + </components> + </components> + + <osSpecifics> + <osSpecific> + <osFamily>any</osFamily> + <packages> + <package> + <name>hadoop_2_9_9_9_98-yarn</name> + </package> + <package> + <name>hadoop_2_9_9_9_98-mapreduce</name> + </package> + </packages> + </osSpecific> + </osSpecifics> + </service> + + <service> + <name>MAPREDUCE2</name> + <displayName>MapReduce2</displayName> + <comment>Apache Hadoop NextGen MapReduce (YARN)</comment> + <version>2.6.0.2.9.9.9</version> + <osSpecifics> + <osSpecific> + <osFamily>any</osFamily> + <packages> + <package> + <name>hadoop_2_9_9_9_98-mapreduce</name> + </package> + </packages> + </osSpecific> + </osSpecifics> + <configuration-dir>configuration-mapred</configuration-dir> + + </service> + + </services> +</metainfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/ZOOKEEPER/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/ZOOKEEPER/metainfo.xml new file mode 100644 index 0000000..525faef --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/ZOOKEEPER/metainfo.xml @@ -0,0 +1,40 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<metainfo> + <schemaVersion>2.0</schemaVersion> + <services> + <service> + <name>ZOOKEEPER</name> + <displayName>ZooKeeper</displayName> + <comment>Centralized service which provides highly reliable distributed coordination</comment> + <version>3.4.5.2.9.9.9</version> + + <osSpecifics> + <osSpecific> + <osFamily>any</osFamily> + <packages> + <package> + <name>zookeeper_2_9_9_9_98</name> + </package> + </packages> + </osSpecific> + </osSpecifics> + + </service> + </services> +</metainfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py index 78cfde7..beed46a 100644 --- a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py +++ b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py @@ -50,18 +50,29 @@ class TestServiceCheck(RMFTestCase): tries = 20, conf_dir = '/etc/hadoop/conf', try_sleep = 3, + bin_dir = '/usr/bin', user = 'ambari-qa', ) - self.assertResourceCalled('ExecuteHadoop', 'fs -mkdir /tmp ; hadoop fs -chmod 777 /tmp', + self.assertResourceCalled('ExecuteHadoop', 'fs -mkdir /tmp', conf_dir = '/etc/hadoop/conf', + bin_dir = '/usr/bin', logoutput = True, - not_if = 'hadoop fs -test -e /tmp', + not_if = 'hadoop --config /etc/hadoop/conf fs -test -e /tmp', try_sleep = 3, tries = 5, user = 'ambari-qa', ) - self.assertResourceCalled('ExecuteHadoop', 'fs -rm /tmp/; hadoop fs -put /etc/passwd /tmp/', + self.assertResourceCalled('ExecuteHadoop', 'fs -chmod 777 /tmp', + conf_dir = '/etc/hadoop/conf', + bin_dir = '/usr/bin', + logoutput = True, + try_sleep = 3, + tries = 5, + user = 'ambari-qa', + ) + self.assertResourceCalled('ExecuteHadoop', 'fs -rm /tmp/; hadoop --config /etc/hadoop/conf fs -put /etc/passwd /tmp/', logoutput = True, + bin_dir = '/usr/bin', tries = 5, conf_dir = '/etc/hadoop/conf', try_sleep = 3, @@ -70,6 +81,7 @@ class TestServiceCheck(RMFTestCase): self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /tmp/', logoutput = True, tries = 5, + bin_dir = '/usr/bin', conf_dir = '/etc/hadoop/conf', try_sleep = 3, user = 'ambari-qa', http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py index c820120..a2261fb 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py +++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py @@ -250,6 +250,7 @@ class TestHBaseMaster(RMFTestCase): hdfs_user = 'hdfs', kinit_path_local = "/usr/bin/kinit", owner = 'hbase', + bin_dir = '/usr/bin', action = ['create_delayed'], ) self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging', @@ -260,6 +261,7 @@ class TestHBaseMaster(RMFTestCase): kinit_path_local = "/usr/bin/kinit", mode = 0711, owner = 'hbase', + bin_dir = '/usr/bin', action = ['create_delayed'], ) self.assertResourceCalled('HdfsDirectory', None, @@ -268,6 +270,7 @@ class TestHBaseMaster(RMFTestCase): conf_dir = '/etc/hadoop/conf', hdfs_user = 'hdfs', kinit_path_local = "/usr/bin/kinit", + bin_dir = '/usr/bin', action = ['create'], ) @@ -350,6 +353,7 @@ class TestHBaseMaster(RMFTestCase): hdfs_user = 'hdfs', kinit_path_local = '/usr/bin/kinit', owner = 'hbase', + bin_dir = '/usr/bin', action = ['create_delayed'], ) self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging', @@ -360,6 +364,7 @@ class TestHBaseMaster(RMFTestCase): kinit_path_local = '/usr/bin/kinit', mode = 0711, owner = 'hbase', + bin_dir = '/usr/bin', action = ['create_delayed'], ) self.assertResourceCalled('HdfsDirectory', None, @@ -368,5 +373,6 @@ class TestHBaseMaster(RMFTestCase): conf_dir = '/etc/hadoop/conf', hdfs_user = 'hdfs', kinit_path_local = '/usr/bin/kinit', + bin_dir = '/usr/bin', action = ['create'], ) http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py index 6a97941..c705fbd 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py +++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py @@ -179,6 +179,7 @@ class TestHbaseRegionServer(RMFTestCase): hdfs_user = 'hdfs', kinit_path_local = '/usr/bin/kinit', owner = 'hbase', + bin_dir = '/usr/bin', action = ['create_delayed'], ) self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging', @@ -189,6 +190,7 @@ class TestHbaseRegionServer(RMFTestCase): kinit_path_local = '/usr/bin/kinit', mode = 0711, owner = 'hbase', + bin_dir = '/usr/bin', action = ['create_delayed'], ) self.assertResourceCalled('HdfsDirectory', None, @@ -197,6 +199,7 @@ class TestHbaseRegionServer(RMFTestCase): conf_dir = '/etc/hadoop/conf', hdfs_user = 'hdfs', kinit_path_local = '/usr/bin/kinit', + bin_dir = '/usr/bin', action = ['create'], ) @@ -279,6 +282,7 @@ class TestHbaseRegionServer(RMFTestCase): hdfs_user = 'hdfs', kinit_path_local = '/usr/bin/kinit', owner = 'hbase', + bin_dir = '/usr/bin', action = ['create_delayed'], ) self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging', @@ -289,6 +293,7 @@ class TestHbaseRegionServer(RMFTestCase): kinit_path_local = '/usr/bin/kinit', mode = 0711, owner = 'hbase', + bin_dir = '/usr/bin', action = ['create_delayed'], ) self.assertResourceCalled('HdfsDirectory', None, @@ -297,5 +302,6 @@ class TestHbaseRegionServer(RMFTestCase): conf_dir = '/etc/hadoop/conf', hdfs_user = 'hdfs', kinit_path_local = '/usr/bin/kinit', + bin_dir = '/usr/bin', action = ['create'], ) http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py index 7f9bfa4..7dab7fc 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py +++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py @@ -39,13 +39,13 @@ class TestServiceCheck(RMFTestCase): content = Template('hbase-smoke.sh.j2'), mode = 0755, ) - self.assertResourceCalled('Execute', ' hbase --config /etc/hbase/conf shell /tmp/hbase-smoke.sh', + self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf shell /tmp/hbase-smoke.sh', logoutput = True, tries = 3, user = 'ambari-qa', try_sleep = 5, ) - self.assertResourceCalled('Execute', ' /tmp/hbaseSmokeVerify.sh /etc/hbase/conf ', + self.assertResourceCalled('Execute', ' /tmp/hbaseSmokeVerify.sh /etc/hbase/conf /usr/lib/hbase/bin/hbase', logoutput = True, tries = 3, user = 'ambari-qa', @@ -74,16 +74,16 @@ class TestServiceCheck(RMFTestCase): group = 'hadoop', mode = 0644, ) - self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.headless.keytab hbase; hbase shell /tmp/hbase_grant_permissions.sh', + self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.headless.keytab hbase; /usr/lib/hbase/bin/hbase shell /tmp/hbase_grant_permissions.sh', user = 'hbase', ) - self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; hbase --config /etc/hbase/conf shell /tmp/hbase-smoke.sh', + self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /usr/lib/hbase/bin/hbase --config /etc/hbase/conf shell /tmp/hbase-smoke.sh', logoutput = True, tries = 3, user = 'ambari-qa', try_sleep = 5, ) - self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hbaseSmokeVerify.sh /etc/hbase/conf ', + self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hbaseSmokeVerify.sh /etc/hbase/conf /usr/lib/hbase/bin/hbase', logoutput = True, tries = 3, user = 'ambari-qa', http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py index 5e38f66..c7d2601 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py +++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py @@ -48,7 +48,7 @@ class TestNamenode(RMFTestCase): content = StaticFile('checkForFormat.sh'), mode = 0755, ) - self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /var/run/hadoop/hdfs/namenode/formatted/ /var/lib/hdfs/namenode/formatted/ /hadoop/hdfs/namenode', + self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /usr/bin /var/run/hadoop/hdfs/namenode/formatted/ /var/lib/hdfs/namenode/formatted/ /hadoop/hdfs/namenode', path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'], not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/ || test -d /var/lib/hdfs/namenode/formatted/', ) @@ -75,7 +75,7 @@ class TestNamenode(RMFTestCase): self.assertResourceCalled('Execute', 'ulimit -c unlimited; su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode\'', not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1', ) - self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'", + self.assertResourceCalled('Execute', "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hadoop --config /etc/hadoop/conf dfsadmin -safemode get' | grep 'Safe mode is OFF'", tries = 40, only_if = None, try_sleep = 10, @@ -88,6 +88,7 @@ class TestNamenode(RMFTestCase): kinit_path_local = '/usr/bin/kinit', mode = 0777, owner = 'hdfs', + bin_dir = '/usr/bin', action = ['create_delayed'], ) self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa', @@ -98,6 +99,7 @@ class TestNamenode(RMFTestCase): kinit_path_local = '/usr/bin/kinit', mode = 0770, owner = 'ambari-qa', + bin_dir = '/usr/bin', action = ['create_delayed'], ) self.assertResourceCalled('HdfsDirectory', None, @@ -107,6 +109,7 @@ class TestNamenode(RMFTestCase): hdfs_user = 'hdfs', kinit_path_local = '/usr/bin/kinit', action = ['create'], + bin_dir = '/usr/bin', only_if = None, ) self.assertNoMoreResources() @@ -149,7 +152,7 @@ class TestNamenode(RMFTestCase): content = StaticFile('checkForFormat.sh'), mode = 0755, ) - self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /var/run/hadoop/hdfs/namenode/formatted/ /var/lib/hdfs/namenode/formatted/ /hadoop/hdfs/namenode', + self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /usr/bin /var/run/hadoop/hdfs/namenode/formatted/ /var/lib/hdfs/namenode/formatted/ /hadoop/hdfs/namenode', path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'], not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/ || test -d /var/lib/hdfs/namenode/formatted/', ) @@ -179,7 +182,7 @@ class TestNamenode(RMFTestCase): self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs', user = 'hdfs', ) - self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'", + self.assertResourceCalled('Execute', "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hadoop --config /etc/hadoop/conf dfsadmin -safemode get' | grep 'Safe mode is OFF'", tries = 40, only_if = None, try_sleep = 10, @@ -192,6 +195,7 @@ class TestNamenode(RMFTestCase): kinit_path_local = '/usr/bin/kinit', mode = 0777, owner = 'hdfs', + bin_dir = '/usr/bin', action = ['create_delayed'], ) self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa', @@ -202,6 +206,7 @@ class TestNamenode(RMFTestCase): kinit_path_local = '/usr/bin/kinit', mode = 0770, owner = 'ambari-qa', + bin_dir = '/usr/bin', action = ['create_delayed'], ) self.assertResourceCalled('HdfsDirectory', None, @@ -211,6 +216,7 @@ class TestNamenode(RMFTestCase): hdfs_user = 'hdfs', kinit_path_local = '/usr/bin/kinit', action = ['create'], + bin_dir = '/usr/bin', only_if = None, ) self.assertNoMoreResources() @@ -260,9 +266,9 @@ class TestNamenode(RMFTestCase): self.assertResourceCalled('Execute', 'ulimit -c unlimited; su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode\'', not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1', ) - self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'", + self.assertResourceCalled('Execute', "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hadoop --config /etc/hadoop/conf dfsadmin -safemode get' | grep 'Safe mode is OFF'", tries = 40, - only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'", + only_if = "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active > /dev/null'", try_sleep = 10, ) self.assertResourceCalled('HdfsDirectory', '/tmp', @@ -273,6 +279,7 @@ class TestNamenode(RMFTestCase): kinit_path_local = '/usr/bin/kinit', mode = 0777, owner = 'hdfs', + bin_dir = '/usr/bin', action = ['create_delayed'], ) self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa', @@ -283,6 +290,7 @@ class TestNamenode(RMFTestCase): kinit_path_local = '/usr/bin/kinit', mode = 0770, owner = 'ambari-qa', + bin_dir = '/usr/bin', action = ['create_delayed'], ) self.assertResourceCalled('HdfsDirectory', None, @@ -292,7 +300,8 @@ class TestNamenode(RMFTestCase): hdfs_user = 'hdfs', kinit_path_local = '/usr/bin/kinit', action = ['create'], - only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'", + bin_dir = '/usr/bin', + only_if = "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active > /dev/null'", ) self.assertNoMoreResources() @@ -326,9 +335,9 @@ class TestNamenode(RMFTestCase): self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs', user = 'hdfs', ) - self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'", + self.assertResourceCalled('Execute', "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hadoop --config /etc/hadoop/conf dfsadmin -safemode get' | grep 'Safe mode is OFF'", tries = 40, - only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'", + only_if = "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active > /dev/null'", try_sleep = 10, ) self.assertResourceCalled('HdfsDirectory', '/tmp', @@ -339,6 +348,7 @@ class TestNamenode(RMFTestCase): kinit_path_local = '/usr/bin/kinit', mode = 0777, owner = 'hdfs', + bin_dir = '/usr/bin', action = ['create_delayed'], ) self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa', @@ -349,6 +359,7 @@ class TestNamenode(RMFTestCase): kinit_path_local = '/usr/bin/kinit', mode = 0770, owner = 'ambari-qa', + bin_dir = '/usr/bin', action = ['create_delayed'], ) self.assertResourceCalled('HdfsDirectory', None, @@ -358,7 +369,8 @@ class TestNamenode(RMFTestCase): hdfs_user = 'hdfs', kinit_path_local = '/usr/bin/kinit', action = ['create'], - only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'", + bin_dir = '/usr/bin', + only_if = "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active > /dev/null'", ) self.assertNoMoreResources() @@ -377,6 +389,7 @@ class TestNamenode(RMFTestCase): self.assertResourceCalled('ExecuteHadoop', 'dfsadmin -refreshNodes', user = 'hdfs', conf_dir = '/etc/hadoop/conf', + bin_dir = '/usr/bin', kinit_override = True) self.assertNoMoreResources() @@ -394,7 +407,8 @@ class TestNamenode(RMFTestCase): self.assertResourceCalled('Execute', '', user = 'hdfs') self.assertResourceCalled('ExecuteHadoop', 'dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -refreshNodes', user = 'hdfs', - conf_dir = '/etc/hadoop/conf', + conf_dir = '/etc/hadoop/conf', + bin_dir = '/usr/bin', kinit_override = True) self.assertNoMoreResources() http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py index 78cfde7..57abab3 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py +++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py @@ -50,20 +50,31 @@ class TestServiceCheck(RMFTestCase): tries = 20, conf_dir = '/etc/hadoop/conf', try_sleep = 3, + bin_dir = '/usr/bin', user = 'ambari-qa', ) - self.assertResourceCalled('ExecuteHadoop', 'fs -mkdir /tmp ; hadoop fs -chmod 777 /tmp', + self.assertResourceCalled('ExecuteHadoop', 'fs -mkdir /tmp', conf_dir = '/etc/hadoop/conf', logoutput = True, - not_if = 'hadoop fs -test -e /tmp', + not_if = 'hadoop --config /etc/hadoop/conf fs -test -e /tmp', try_sleep = 3, tries = 5, + bin_dir = '/usr/bin', user = 'ambari-qa', ) - self.assertResourceCalled('ExecuteHadoop', 'fs -rm /tmp/; hadoop fs -put /etc/passwd /tmp/', + self.assertResourceCalled('ExecuteHadoop', 'fs -chmod 777 /tmp', + conf_dir = '/etc/hadoop/conf', + logoutput = True, + try_sleep = 3, + tries = 5, + bin_dir = '/usr/bin', + user = 'ambari-qa', + ) + self.assertResourceCalled('ExecuteHadoop', 'fs -rm /tmp/; hadoop --config /etc/hadoop/conf fs -put /etc/passwd /tmp/', logoutput = True, tries = 5, conf_dir = '/etc/hadoop/conf', + bin_dir = '/usr/bin', try_sleep = 3, user = 'ambari-qa', ) @@ -71,6 +82,7 @@ class TestServiceCheck(RMFTestCase): logoutput = True, tries = 5, conf_dir = '/etc/hadoop/conf', + bin_dir = '/usr/bin', try_sleep = 3, user = 'ambari-qa', )