AMBARI-8433 Enable HDP 2.2.GlusterFS stack

Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c15cd0e0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c15cd0e0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c15cd0e0

Branch: refs/heads/trunk
Commit: c15cd0e093c2fe3303c36972a5c943e4f00b5ee8
Parents: aa696e0
Author: Scott Creeley <scree...@apache.org>
Authored: Thu Dec 11 13:49:16 2014 -0500
Committer: Scott Creeley <scree...@apache.org>
Committed: Thu Dec 11 13:49:50 2014 -0500

----------------------------------------------------------------------
 .../2.0.6/hooks/after-INSTALL/scripts/params.py |    1 +
 .../scripts/shared_initialization.py            |    3 +-
 .../2.0.6/hooks/before-ANY/scripts/params.py    |    1 +
 .../before-ANY/scripts/shared_initialization.py |   14 +-
 .../2.2.GlusterFS/configuration/cluster-env.xml |   56 +
 .../stacks/HDP/2.2.GlusterFS/metainfo.xml       |   23 +
 .../stacks/HDP/2.2.GlusterFS/repos/repoinfo.xml |   68 +
 .../HDP/2.2.GlusterFS/role_command_order.json   |   79 +
 .../2.2.GlusterFS/services/FLUME/metainfo.xml   |   47 +
 .../GLUSTERFS/configuration/core-site.xml       |   41 +
 .../GLUSTERFS/configuration/hadoop-env.xml      |  194 ++
 .../services/GLUSTERFS/metainfo.xml             |   74 +
 .../GLUSTERFS/package/scripts/glusterfs.py      |   29 +
 .../package/scripts/glusterfs_client.py         |   34 +
 .../GLUSTERFS/package/scripts/params.py         |   29 +
 .../GLUSTERFS/package/scripts/service_check.py  |   37 +
 .../package/templates/glusterfs-env.sh.j2       |   38 +
 .../package/templates/glusterfs.properties.j2   |   36 +
 .../services/HBASE/configuration/hbase-site.xml |   65 +
 .../2.2.GlusterFS/services/HBASE/metainfo.xml   |   47 +
 .../services/HIVE/configuration/hive-env.xml    |   64 +
 .../services/HIVE/configuration/hive-site.xml   | 1119 ++++++++
 .../HIVE/configuration/webhcat-site.xml         |  112 +
 .../2.2.GlusterFS/services/HIVE/metainfo.xml    |   88 +
 .../2.2.GlusterFS/services/KAFKA/alerts.json    |   26 +
 .../KAFKA/configuration/kafka-broker.xml        |  321 +++
 .../services/KAFKA/configuration/kafka-env.xml  |   55 +
 .../KAFKA/configuration/kafka-log4j.xml         |  116 +
 .../2.2.GlusterFS/services/KAFKA/metainfo.xml   |   82 +
 .../2.2.GlusterFS/services/KAFKA/metrics.json   |  262 ++
 .../services/KAFKA/package/scripts/kafka.py     |   70 +
 .../KAFKA/package/scripts/kafka_broker.py       |   63 +
 .../services/KAFKA/package/scripts/params.py    |   58 +
 .../KAFKA/package/scripts/properties_config.py  |   32 +
 .../KAFKA/package/scripts/service_check.py      |   65 +
 .../KAFKA/package/scripts/status_params.py      |   26 +
 .../KERBEROS/configuration/kadm5-acl.xml        |   36 +
 .../KERBEROS/configuration/kdc-conf.xml         |   57 +
 .../KERBEROS/configuration/krb5-conf.xml        |  186 ++
 .../services/KERBEROS/metainfo.xml              |  167 ++
 .../KERBEROS/package/scripts/kerberos_client.py |   40 +
 .../KERBEROS/package/scripts/kerberos_common.py |  398 +++
 .../KERBEROS/package/scripts/kerberos_server.py |  144 +
 .../services/KERBEROS/package/scripts/params.py |  211 ++
 .../KERBEROS/package/scripts/service_check.py   |   63 +
 .../services/KERBEROS/package/scripts/utils.py  |   69 +
 .../KERBEROS/package/templates/kadm5_acl.j2     |   20 +
 .../KERBEROS/package/templates/kdc_conf.j2      |   30 +
 .../KERBEROS/package/templates/krb5_conf.j2     |   47 +
 .../HDP/2.2.GlusterFS/services/KNOX/alerts.json |   26 +
 .../KNOX/configuration/gateway-log4j.xml        |   83 +
 .../KNOX/configuration/gateway-site.xml         |   72 +
 .../services/KNOX/configuration/knox-env.xml    |   53 +
 .../services/KNOX/configuration/ldap-log4j.xml  |   66 +
 .../services/KNOX/configuration/topology.xml    |  116 +
 .../services/KNOX/configuration/users-ldif.xml  |  135 +
 .../2.2.GlusterFS/services/KNOX/metainfo.xml    |   87 +
 .../KNOX/package/files/validateKnoxStatus.py    |   43 +
 .../services/KNOX/package/scripts/knox.py       |   74 +
 .../KNOX/package/scripts/knox_gateway.py        |  101 +
 .../services/KNOX/package/scripts/ldap.py       |   39 +
 .../services/KNOX/package/scripts/params.py     |  140 +
 .../KNOX/package/scripts/service_check.py       |   58 +
 .../KNOX/package/scripts/status_params.py       |   27 +
 .../package/templates/krb5JAASLogin.conf.j2     |   30 +
 .../services/OOZIE/configuration/oozie-env.xml  |  101 +
 .../services/OOZIE/configuration/oozie-site.xml |  107 +
 .../2.2.GlusterFS/services/OOZIE/metainfo.xml   |   70 +
 .../HDP/2.2.GlusterFS/services/PIG/metainfo.xml |   48 +
 .../SLIDER/configuration/slider-client.xml      |   60 +
 .../SLIDER/configuration/slider-env.xml         |   43 +
 .../SLIDER/configuration/slider-log4j.xml       |   89 +
 .../2.2.GlusterFS/services/SLIDER/metainfo.xml  |  132 +
 .../SLIDER/package/files/hbaseSmokeVerify.sh    |   34 +
 .../services/SLIDER/package/scripts/__init__.py |   19 +
 .../services/SLIDER/package/scripts/params.py   |   49 +
 .../SLIDER/package/scripts/service_check.py     |   45 +
 .../services/SLIDER/package/scripts/slider.py   |   62 +
 .../SLIDER/package/scripts/slider_client.py     |   43 +
 .../package/templates/storm-slider-env.sh.j2    |   38 +
 .../2.2.GlusterFS/services/SQOOP/metainfo.xml   |   52 +
 .../services/TEZ/configuration/tez-site.xml     |  311 +++
 .../HDP/2.2.GlusterFS/services/TEZ/metainfo.xml |   47 +
 .../YARN/configuration-mapred/mapred-env.xml    |   46 +
 .../YARN/configuration-mapred/mapred-site.xml   |  173 ++
 .../YARN/configuration/capacity-scheduler.xml   |  131 +
 .../YARN/configuration/mapred-site.xml.2        |   68 +
 .../services/YARN/configuration/yarn-site.xml   |  565 ++++
 .../2.2.GlusterFS/services/YARN/metainfo.xml    |   83 +
 .../2.2.GlusterFS/services/YARN/metrics.json    | 2534 ++++++++++++++++++
 .../services/ZOOKEEPER/metainfo.xml             |   46 +
 .../HDP/2.2.GlusterFS/upgrades/upgrade-2.2.xml  |   40 +
 92 files changed, 11125 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
index 410198b..64c7979 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
@@ -24,6 +24,7 @@ from resource_management.core.system import System
 config = Script.get_config()
 
 hdp_stack_version = str(config['hostLevelParams']['stack_version'])
+hdp_full_stack_version = hdp_stack_version
 hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
 
 #hadoop params

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
index 9d1e343..acd45f8 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
@@ -28,7 +28,8 @@ def setup_hdp_install_directory():
 
 def setup_config():
   import params
-  if params.has_namenode:
+  stackversion = params.hdp_full_stack_version
+  if params.has_namenode or stackversion.find('Gluster') >= 0:
     XmlConfig("core-site.xml",
               conf_dir=params.hadoop_conf_dir,
               configurations=params.config['configurations']['core-site'],

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
index c97aa16..e726285 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
@@ -34,6 +34,7 @@ java_home = config['hostLevelParams']['java_home']
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 
 hdp_stack_version = str(config['hostLevelParams']['stack_version'])
+hdp_full_stack_version = hdp_stack_version
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
index 51aab57..15bae9d 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
@@ -99,7 +99,8 @@ def set_uid(user, user_dirs):
     
 def setup_hadoop_env():
   import params
-  if params.has_namenode:
+  stackversion = params.hdp_full_stack_version
+  if params.has_namenode or stackversion.find('Gluster') >= 0:
     if params.security_enabled:
       tc_owner = "root"
     else:
@@ -115,11 +116,18 @@ def setup_hadoop_env():
               owner=params.hdfs_user,
               group=params.user_group
     )
-    Directory(params.hadoop_conf_empty_dir,
+    if stackversion.find('Gluster') >= 0:
+        Directory(params.hadoop_conf_empty_dir,
+              recursive=True,
+              owner="root",
+              group=params.user_group
+        )
+    else:
+        Directory(params.hadoop_conf_empty_dir,
               recursive=True,
               owner=tc_owner,
               group=params.user_group
-    )
+        )
     Link(params.hadoop_conf_dir,
          to=params.hadoop_conf_empty_dir,
          not_if=format("ls {hadoop_conf_dir}")

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/configuration/cluster-env.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/configuration/cluster-env.xml
new file mode 100644
index 0000000..d41ff98
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/configuration/cluster-env.xml
@@ -0,0 +1,56 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+    <property>
+        <name>security_enabled</name>
+        <value>false</value>
+        <description>Hadoop Security</description>
+    </property>
+    <property>
+        <name>kerberos_domain</name>
+        <value>EXAMPLE.COM</value>
+        <description>Kerberos realm.</description>
+    </property>
+    <property>
+        <name>ignore_groupsusers_create</name>
+        <value>false</value>
+        <description>Whether to ignore failures on users and group 
creation</description>
+    </property>
+    <property>
+        <name>smokeuser</name>
+        <value>ambari-qa</value>
+        <property-type>USER</property-type>
+        <description>User executing service checks</description>
+    </property>
+    <property>
+        <name>smokeuser_keytab</name>
+        <value>/etc/security/keytabs/smokeuser.headless.keytab</value>
+        <description>Path to smoke test user keytab file</description>
+    </property>
+    <property>
+        <name>user_group</name>
+        <value>hadoop</value>
+        <property-type>GROUP</property-type>
+        <description>Hadoop user group.</description>
+    </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/metainfo.xml 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/metainfo.xml
new file mode 100644
index 0000000..39ec8e7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/metainfo.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+         <active>false</active>
+    </versions>
+    <extends>2.1</extends>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/repos/repoinfo.xml 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/repos/repoinfo.xml
new file mode 100644
index 0000000..7acb8b9
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/repos/repoinfo.xml
@@ -0,0 +1,68 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  
<latest>http://s3.amazonaws.com/dev.hortonworks.com/HDP/hdp_urlinfo.json</latest>
+  <os family="redhat6">
+    <repo>
+      
<baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0</baseurl>
+      <repoid>HDP-2.2</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      
<baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+  <os family="redhat5">
+    <repo>
+      
<baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos5/2.x/updates/2.2.0.0</baseurl>
+      <repoid>HDP-2.2</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      
<baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos5</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+  <os family="suse11">
+    <repo>
+      
<baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/sles11sp1/2.x/updates/2.2.0.0</baseurl>
+      <repoid>HDP-2.2</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      
<baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/suse11sp3</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+  <os family="ubuntu12">
+    <repo>
+      
<baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu12/2.x/BUILDS/2.2.0.0-962</baseurl>
+      <repoid>HDP-2.2</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      
<baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/ubuntu12</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/role_command_order.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/role_command_order.json
 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/role_command_order.json
new file mode 100644
index 0000000..72b49fa
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/role_command_order.json
@@ -0,0 +1,79 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, 
blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "NIMBUS-START" : ["ZOOKEEPER_SERVER-START", "NODEMANAGER-START", 
"RESOURCEMANAGER-START"],
+    "SUPERVISOR-START" : ["NIMBUS-START"],
+    "STORM_UI_SERVER-START" : ["NIMBUS-START"],
+    "DRPC_SERVER-START" : ["NIMBUS-START"],
+    "STORM_REST_API-START" : ["NIMBUS-START", "STORM_UI_SERVER-START", 
"SUPERVISOR-START", "DRPC_SERVER-START"],
+    "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
+    "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
+    "OOZIE_SERVER-START": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "WEBHCAT_SERVER-START": ["NODEMANAGER-START", "HIVE_SERVER-START"],
+    "HIVE_METASTORE-START": ["MYSQL_SERVER-START"],
+    "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START"],
+    "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", 
"OOZIE_SERVER-START"],
+    "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
+    "FALCON_SERVER-START": ["NAMENODE-START", "DATANODE-START", 
"OOZIE_SERVER-START"],
+    "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", 
"RESOURCEMANAGER-START"],
+    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
+    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
+    "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", 
"HBASE_REGIONSERVER-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", 
"HIVE_METASTORE-START", "WEBHCAT_SERVER-START"],
+    "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", 
"RESOURCEMANAGER-START"],
+    "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", 
"RESOURCEMANAGER-START"],
+    "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "STORM_SERVICE_CHECK-SERVICE_CHECK": ["NIMBUS-START", "SUPERVISOR-START", 
"STORM_UI_SERVER-START",
+        "DRPC_SERVER-START"],
+    "FLUME_SERVICE_CHECK-SERVICE_CHECK": ["FLUME_HANDLER-START"],
+    "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],
+    "SLIDER_SERVICE_CHECK-SERVICE_CHECK" : ["NODEMANAGER-START", 
"RESOURCEMANAGER-START"],
+    "KAFKA_BROKER-START" : ["ZOOKEEPER_SERVER-START"],
+    "KAFKA_SERVICE_CHECK-SERVICE_CHECK": ["KAFKA_BROKER-START"],
+    "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"],
+    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
+    "NIMBUS-STOP" : ["SUPERVISOR-STOP", "STORM_UI_SERVER-STOP", 
"DRPC_SERVER-STOP"]
+  },
+  "_comment" : "GLUSTERFS-specific dependencies",
+  "optional_glusterfs": {
+    "HBASE_MASTER-START": ["PEERSTATUS-START"],
+    "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"]
+  },
+  "_comment" : "Dependencies that are used when GLUSTERFS is not present in 
cluster",
+  "optional_no_glusterfs": {
+    "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
+    "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
+    "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", 
"RESOURCEMANAGER-START"],
+    "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
+    "APP_TIMELINE_SERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "FALCON_SERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],
+    "HIVE_SERVER-START": ["DATANODE-START"],
+    "WEBHCAT_SERVER-START": ["DATANODE-START"],
+    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
+        "SECONDARY_NAMENODE-START"],
+    "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
+        "RESOURCEMANAGER-START", "HISTORYSERVER-START", 
"YARN_SERVICE_CHECK-SERVICE_CHECK"],
+    "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", 
"RESOURCEMANAGER-START"],
+    "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", 
"NODEMANAGER-START"],
+    "NAMENODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
+        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP", "FALCON_SERVER-STOP"],
+    "DATANODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
+        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP", "FALCON_SERVER-STOP"]
+  },
+  "_comment" : "Dependencies that are used in HA NameNode cluster",
+  "namenode_optional_ha": {
+    "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", 
"ZOOKEEPER_SERVER-START"],
+    "ZKFC-START": ["ZOOKEEPER_SERVER-START"]
+  },
+  "_comment" : "Dependencies that are used in ResourceManager HA cluster",
+  "resourcemanager_optional_ha" : {
+    "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/FLUME/metainfo.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/FLUME/metainfo.xml
new file mode 100644
index 0000000..c5b38b3
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/FLUME/metainfo.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FLUME</name>
+      <displayName>Flume</displayName>
+      <version>1.5.2.2.2.0.0</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>flume_2_2_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>flume-2-2-.*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
new file mode 100644
index 0000000..7794ac8
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
@@ -0,0 +1,41 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+ <!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+ 
+        http://www.apache.org/licenses/LICENSE-2.0
+ 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+ 
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" 
xmlns:xi="http://www.w3.org/2001/XInclude";>
+
+<!-- file system properties -->
+
+  <property>
+  <name>fs.AbstractFileSystem.glusterfs.impl</name>
+  <value>org.apache.hadoop.fs.local.GlusterFs</value>
+  </property>
+
+  <property>
+  <name>fs.glusterfs.impl</name>
+  <value>org.apache.hadoop.fs.glusterfs.GlusterFileSystem</value>
+  </property>
+
+  <property>
+    <name>fs.defaultFS</name>
+    <value>glusterfs:///localhost:8020</value>
+  </property>  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..bce6b53
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
@@ -0,0 +1,194 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+ <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>glusterfs_user</name>
+    <value>root</value>
+    <description></description>
+  </property>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_host</name>
+    <value></value>
+    <description>NameNode Host.</description>
+  </property>
+  <property>
+    <name>snamenode_host</name>
+    <value></value>
+    <description>Secondary NameNode.</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User to run HDFS as</description>
+  </property>
+  <!--
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <description>Proxy user group.</description>
+  </property>
+  -->
+    <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hadoop-env.sh 
file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS 
-Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} 
-Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console 
${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export 
HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export 
HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/metainfo.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/metainfo.xml
new file mode 100644
index 0000000..2a6c16e
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/metainfo.xml
@@ -0,0 +1,74 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>GLUSTERFS</name>
+      <displayName>GLUSTERFS</displayName>
+      <comment>An Hadoop Compatible File System</comment>
+      <version>2.1.3.0</version>
+      <components>
+        <component>
+          <name>GLUSTERFS_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/glusterfs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>                       
+            <configFile>
+              <type>env</type>
+              <fileName>hadoop-env.sh</fileName>
+              <dictionaryName>hadoop-env</dictionaryName>
+            </configFile>          
+        </component>
+      </components>
+<!--
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any<osFamily>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>glusterfs</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+-->
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>hadoop-env</config-type>
+        <!--<config-type>hdfs-site</config-type>-->
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs.py
new file mode 100644
index 0000000..8b64c6a
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+
+def glusterfs():
+  import params
+
+  Directory( params.glusterfs_conf_dir
+  )
+
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs_client.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs_client.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs_client.py
new file mode 100644
index 0000000..840c76c
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs_client.py
@@ -0,0 +1,34 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+from glusterfs import glusterfs
+
+class GlusterFSClient(Script):
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    glusterfs()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  GlusterFSClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/scripts/params.py
new file mode 100644
index 0000000..6d88109
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/scripts/params.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+
+#glusterfs_home = '/usr/lib/glusterfs'
+glusterfs_conf_dir = '/etc/glusterfs'
+log_dir = '/var/log/glusterfs'
+java64_home = config['hostLevelParams']['java_home']
+hadoop_home = "/usr"

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/scripts/service_check.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/scripts/service_check.py
new file mode 100644
index 0000000..6619a73
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/scripts/service_check.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class GlusterFSServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    Execute(format("env  GLUSTERFS_LOG_DIR=/var/log/glusterfs "
+                   "GLUSTERFS_PID_DIR=/var/run/glusterfs "
+                   "glusterd --version"),
+            logoutput=True,
+            tries = 3,
+            try_sleep = 20
+    )
+
+if __name__ == "__main__":
+  GlusterFSServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/templates/glusterfs-env.sh.j2
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/templates/glusterfs-env.sh.j2
 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/templates/glusterfs-env.sh.j2
new file mode 100644
index 0000000..eda6de7
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/templates/glusterfs-env.sh.j2
@@ -0,0 +1,38 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME={{java64_home}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/templates/glusterfs.properties.j2
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/templates/glusterfs.properties.j2
 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/templates/glusterfs.properties.j2
new file mode 100644
index 0000000..1bf6e1d
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/GLUSTERFS/package/templates/glusterfs.properties.j2
@@ -0,0 +1,36 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# GlusterFS configuration file. All values can be overwritten by command line 
arguments.
+
+
+
+# load jarfile, colon separated
+#jar=/usr/lib/hadoop/lib
+
+#verbose print all log messages to screen (default to print only INFO and 
above to screen)
+#verbose=true
+
+#exectype local|mapreduce, mapreduce is default
+#exectype=mapreduce
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/HBASE/configuration/hbase-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/HBASE/configuration/hbase-site.xml
new file mode 100644
index 0000000..e5b893f
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/HBASE/configuration/hbase-site.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.hstore.flush.retries.number</name>
+    <value>120</value>
+    <deleted>true</deleted>
+    <description>
+    The number of times the region flush operation will be retried.
+    </description>
+  </property>
+  
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value>604800000</value>
+    <description>Time between major compactions, expressed in milliseconds. 
Set to 0 to disable
+      time-based automatic major compactions. User-requested and size-based 
major compactions will
+      still run. This value is multiplied by 
hbase.hregion.majorcompaction.jitter to cause
+      compaction to start at a somewhat-random time during a given window of 
time. The default value
+      is 7 days, expressed in milliseconds. If major compactions are causing 
disruption in your
+      environment, you can configure them to run at off-peak times for your 
deployment, or disable
+      time-based major compactions by setting this parameter to 0, and run 
major compactions in a
+      cron job or by another external mechanism.</description>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction.jitter</name>
+    <value>0.50</value>
+    <description>A multiplier applied to hbase.hregion.majorcompaction to 
cause compaction to occur
+      a given amount of time either side of hbase.hregion.majorcompaction. The 
smaller the number,
+      the closer the compactions will happen to the 
hbase.hregion.majorcompaction
+      interval.</description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value>4</value>
+    <description>
+    Block updates if memstore has hbase.hregion.memstore.block.multiplier
+    times hbase.hregion.memstore.flush.size bytes.  Useful preventing
+    runaway memstore during spikes in update traffic.  Without an
+    upper-bound, memstore fills such that when it flushes the
+    resultant flush files take a long time to compact or split, or
+    worse, we OOME.
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/HBASE/metainfo.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..9b357d0
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/HBASE/metainfo.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <displayName>HBase</displayName>
+      <version>0.98.4.2.2.0.0</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>hbase_2_2_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>hbase-2-2-.*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c15cd0e0/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/HIVE/configuration/hive-env.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/HIVE/configuration/hive-env.xml
new file mode 100644
index 0000000..0eb4ef2
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/HIVE/configuration/hive-env.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- hive-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hive-env.sh file</description>
+    <value>
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m 
-XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC 
-XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m 
-XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE="{{hive_heapsize}}"
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Larger heap size may be required when running queries over large number of 
files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size 
would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{hive_config_dir}}
+
+# Folder containing extra libraries required for hive compilation/execution 
can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog
+fi
+
+export METASTORE_PORT={{hive_metastore_port}}
+    </value>
+  </property>
+  
+</configuration>

Reply via email to