AMBARI-7878 BIGTOP stack definition should be updated (adenisso via jaoki)

Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/32b1fc38
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/32b1fc38
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/32b1fc38

Branch: refs/heads/trunk
Commit: 32b1fc38e374bfc2285c37f8dd83c04b35ab3c92
Parents: c9e24d5
Author: Jun Aoki <ja...@apache.org>
Authored: Tue Nov 18 14:38:06 2014 -0800
Committer: Jun Aoki <ja...@apache.org>
Committed: Tue Nov 18 14:38:06 2014 -0800

----------------------------------------------------------------------
 .../0.8/hooks/after-INSTALL/scripts/hook.py     |   2 +-
 .../0.8/hooks/after-INSTALL/scripts/params.py   |  25 +-
 .../scripts/shared_initialization.py            |  24 +-
 .../hooks/before-ANY/files/changeToSecureUid.sh |  50 ++++
 .../BIGTOP/0.8/hooks/before-ANY/scripts/hook.py |   2 +
 .../0.8/hooks/before-ANY/scripts/params.py      | 105 ++++++-
 .../before-ANY/scripts/shared_initialization.py |  56 ++++
 .../before-INSTALL/files/changeToSecureUid.sh   |  50 ----
 .../0.8/hooks/before-INSTALL/scripts/hook.py    |   1 -
 .../0.8/hooks/before-INSTALL/scripts/params.py  |   6 +-
 .../scripts/shared_initialization.py            |  41 +--
 .../hooks/before-START/files/checkForFormat.sh  |   5 +-
 .../0.8/hooks/before-START/scripts/hook.py      |   1 -
 .../0.8/hooks/before-START/scripts/params.py    |  24 +-
 .../scripts/shared_initialization.py            |  12 +-
 .../stacks/BIGTOP/0.8/repos/repoinfo.xml        |   2 +-
 .../stacks/BIGTOP/0.8/role_command_order.json   |   3 +-
 .../services/FLUME/configuration/flume-env.xml  |  38 +++
 .../BIGTOP/0.8/services/FLUME/metainfo.xml      |   2 +-
 .../0.8/services/FLUME/package/scripts/flume.py |   9 +-
 .../FLUME/package/scripts/flume_check.py        |   2 +-
 .../services/FLUME/package/scripts/params.py    |  12 +-
 .../GANGLIA/configuration/ganglia-env.xml       |   5 +
 .../GANGLIA/package/files/startRrdcached.sh     |   2 +-
 .../GANGLIA/package/scripts/ganglia_monitor.py  |   4 +-
 .../services/GANGLIA/package/scripts/params.py  |  31 +-
 .../services/HBASE/configuration/hbase-site.xml |  39 ---
 .../BIGTOP/0.8/services/HBASE/metainfo.xml      |   2 +-
 .../HBASE/package/files/hbaseSmokeVerify.sh     |   3 +-
 .../HBASE/package/scripts/hbase_decommission.py |  62 ++--
 .../HBASE/package/scripts/hbase_service.py      |   2 +-
 .../services/HBASE/package/scripts/params.py    |  33 ++-
 .../HBASE/package/scripts/service_check.py      |   6 +-
 .../services/HDFS/configuration/hadoop-env.xml  |  10 +-
 .../services/HDFS/configuration/hdfs-site.xml   |  94 +-----
 .../BIGTOP/0.8/services/HDFS/metainfo.xml       |   2 +-
 .../HDFS/package/files/checkForFormat.sh        |   4 +-
 .../0.8/services/HDFS/package/scripts/hdfs.py   |   9 +
 .../HDFS/package/scripts/hdfs_datanode.py       |  27 +-
 .../HDFS/package/scripts/hdfs_namenode.py       |  15 +-
 .../services/HDFS/package/scripts/namenode.py   |   2 +-
 .../0.8/services/HDFS/package/scripts/params.py |  44 ++-
 .../HDFS/package/scripts/service_check.py       |  35 ++-
 .../0.8/services/HDFS/package/scripts/utils.py  |  93 +++++-
 .../services/HIVE/configuration/hcat-env.xml    |  57 ++++
 .../services/HIVE/configuration/hive-env.xml    |   2 +-
 .../services/HIVE/configuration/hive-site.xml   | 284 ++++++++++++++++---
 .../services/HIVE/configuration/webhcat-env.xml |  54 ++++
 .../HIVE/configuration/webhcat-site.xml         | 138 +++++++++
 .../BIGTOP/0.8/services/HIVE/metainfo.xml       | 153 +++++-----
 .../HIVE/package/files/templetonSmoke.sh        |  96 +++++++
 .../0.8/services/HIVE/package/scripts/hcat.py   |  21 +-
 .../HIVE/package/scripts/hcat_service_check.py  |  10 +-
 .../0.8/services/HIVE/package/scripts/hive.py   |  35 +--
 .../HIVE/package/scripts/hive_service.py        |  34 +--
 .../HIVE/package/scripts/install_jars.py        |  37 ++-
 .../0.8/services/HIVE/package/scripts/params.py | 108 +++++--
 .../HIVE/package/scripts/postgresql_server.py   |   8 +-
 .../HIVE/package/scripts/postgresql_service.py  |   2 +
 .../HIVE/package/scripts/service_check.py       |  13 +-
 .../HIVE/package/scripts/status_params.py       |   1 +
 .../services/HIVE/package/scripts/webhcat.py    | 131 +++++++++
 .../HIVE/package/scripts/webhcat_server.py      |  53 ++++
 .../HIVE/package/scripts/webhcat_service.py     |  40 +++
 .../package/scripts/webhcat_service_check.py    |  41 +++
 .../HIVE/package/templates/hcat-env.sh.j2       |  43 ---
 .../package/templates/startHiveserver2.sh.j2    |   2 +-
 .../BIGTOP/0.8/services/MAHOUT/metainfo.xml     |  66 -----
 .../services/MAHOUT/package/scripts/mahout.py   |  66 -----
 .../MAHOUT/package/scripts/mahout_client.py     |  36 ---
 .../services/MAHOUT/package/scripts/params.py   |  55 ----
 .../MAHOUT/package/scripts/service_check.py     |  92 ------
 .../MAHOUT/package/templates/mahout-env.sh.j2   |  34 ---
 .../services/OOZIE/configuration/oozie-env.xml  |   4 +-
 .../OOZIE/configuration/oozie-log4j.xml         |   2 +-
 .../BIGTOP/0.8/services/OOZIE/metainfo.xml      |  14 +-
 .../services/OOZIE/package/files/oozieSmoke2.sh |  33 ++-
 .../0.8/services/OOZIE/package/scripts/oozie.py |  74 +++--
 .../OOZIE/package/scripts/oozie_client.py       |   2 +
 .../OOZIE/package/scripts/oozie_server.py       |   4 +-
 .../OOZIE/package/scripts/oozie_service.py      |  23 +-
 .../services/OOZIE/package/scripts/params.py    |  88 ++++--
 .../OOZIE/package/scripts/service_check.py      |   5 +-
 .../package/templates/catalina.properties.j2    |  81 ++++++
 .../package/templates/oozie-log4j.properties.j2 |   2 +-
 .../0.8/services/PIG/package/scripts/params.py  |  16 +-
 .../0.8/services/PIG/package/scripts/pig.py     |   1 +
 .../PIG/package/scripts/service_check.py        |  10 +-
 .../services/SQOOP/configuration/sqoop-env.xml  |  54 ----
 .../BIGTOP/0.8/services/SQOOP/metainfo.xml      |  92 ------
 .../services/SQOOP/package/scripts/__init__.py  |  19 --
 .../services/SQOOP/package/scripts/params.py    |  37 ---
 .../SQOOP/package/scripts/service_check.py      |  37 ---
 .../0.8/services/SQOOP/package/scripts/sqoop.py |  57 ----
 .../SQOOP/package/scripts/sqoop_client.py       |  41 ---
 .../WEBHCAT/configuration/webhcat-env.xml       |  54 ----
 .../WEBHCAT/configuration/webhcat-site.xml      | 138 ---------
 .../BIGTOP/0.8/services/WEBHCAT/metainfo.xml    | 107 -------
 .../WEBHCAT/package/files/templetonSmoke.sh     |  96 -------
 .../WEBHCAT/package/scripts/__init__.py         |  20 --
 .../services/WEBHCAT/package/scripts/params.py  |  83 ------
 .../WEBHCAT/package/scripts/service_check.py    |  45 ---
 .../WEBHCAT/package/scripts/status_params.py    |  26 --
 .../services/WEBHCAT/package/scripts/webhcat.py |  93 ------
 .../WEBHCAT/package/scripts/webhcat_server.py   |  53 ----
 .../WEBHCAT/package/scripts/webhcat_service.py  |  40 ---
 .../YARN/configuration-mapred/mapred-site.xml   |   6 -
 .../YARN/configuration/capacity-scheduler.xml   |  10 +-
 .../BIGTOP/0.8/services/YARN/metainfo.xml       |   2 +-
 .../scripts/application_timeline_server.py      |   4 +-
 .../package/scripts/mapred_service_check.py     |   4 +
 .../0.8/services/YARN/package/scripts/params.py |  55 ++--
 .../YARN/package/scripts/resourcemanager.py     |   5 +-
 .../services/YARN/package/scripts/service.py    |   2 +-
 .../YARN/package/scripts/service_check.py       |   3 +-
 .../YARN/package/scripts/status_params.py       |   5 +-
 .../0.8/services/YARN/package/scripts/yarn.py   |  14 +-
 .../ZOOKEEPER/configuration/zookeeper-env.xml   |   1 +
 .../BIGTOP/0.8/services/ZOOKEEPER/metainfo.xml  |   2 +-
 .../ZOOKEEPER/package/files/zkService.sh        |   2 +-
 .../services/ZOOKEEPER/package/files/zkSmoke.sh |  14 +-
 .../ZOOKEEPER/package/scripts/params.py         |  16 +-
 .../templates/zookeeper_client_jaas.conf.j2     |   1 -
 .../stacks/BIGTOP/0.8/services/stack_advisor.py |  80 +++---
 124 files changed, 2135 insertions(+), 2250 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/hook.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/hook.py
index cf83e19..71ac3df 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/hook.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/hook.py
@@ -28,7 +28,7 @@ class AfterInstallHook(Hook):
     import params
 
     env.set_params(params)
-    setup_hadoop_env()
+    setup_hdp_install_directory()
     setup_config()
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/params.py
index 1c611d4..f5851aa 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/params.py
@@ -19,24 +19,31 @@ limitations under the License.
 
 from resource_management import *
 from resource_management.core.system import System
-import os
 
 config = Script.get_config()
 
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+#hadoop params
+if rpm_version:
+  mapreduce_libs_path = "/usr/bigtop/current/hadoop-mapreduce-client/*"
+  hadoop_libexec_dir = "/usr/bigtop/current/hadoop-client/libexec"
+else:
+  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+versioned_hdp_root = '/usr/bigtop/current'
 #security params
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 #java params
 java_home = config['hostLevelParams']['java_home']
 #hadoop params
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
 hdfs_log_dir_prefix = 
config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hadoop_pid_dir_prefix = 
config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
 hadoop_root_logger = 
config['configurations']['hadoop-env']['hadoop_root_logger']
-hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
-
-#hadoop-env.sh
-java_home = config['hostLevelParams']['java_home']
 
 if str(config['hostLevelParams']['stack_version']).startswith('2.0') and 
System.get_instance().os_family != "suse":
   # deprecated rhel jsvc_path
@@ -58,8 +65,6 @@ ttnode_heapsize = "1024m"
 
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = 
default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
 mapred_log_dir_prefix = 
default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
 
 #users and groups
@@ -67,4 +72,4 @@ hdfs_user = 
config['configurations']['hadoop-env']['hdfs_user']
 user_group = config['configurations']['cluster-env']['user_group']
 
 namenode_host = default("/clusterHostInfo/namenode_host", [])
-has_namenode = not len(namenode_host) == 0
\ No newline at end of file
+has_namenode = not len(namenode_host) == 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/shared_initialization.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/shared_initialization.py
index 1ba0ae7..a930f54 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/shared_initialization.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/shared_initialization.py
@@ -19,25 +19,11 @@ limitations under the License.
 import os
 from resource_management import *
 
-def setup_hadoop_env():
+def setup_hdp_install_directory():
   import params
-  if params.has_namenode:
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-    Directory(params.hadoop_conf_empty_dir,
-              recursive=True,
-              owner='root',
-              group='root'
-    )
-    Link(params.hadoop_conf_dir,
-         to=params.hadoop_conf_empty_dir,
-         not_if=format("ls {hadoop_conf_dir}")
-    )
-    File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'),
-         owner=tc_owner,
-         content=InlineTemplate(params.hadoop_env_sh_template)
+  if params.rpm_version:
+    Execute(format('ambari-python-wrap /usr/bin/bigtop-select set all 
`ambari-python-wrap /usr/bin/bigtop-select versions | grep ^{rpm_version}- | 
tail -1`'),
+            only_if=format('ls -d /usr/bigtop/{rpm_version}-*')
     )
 
 def setup_config():
@@ -49,4 +35,4 @@ def setup_config():
               
configuration_attributes=params.config['configuration_attributes']['core-site'],
               owner=params.hdfs_user,
               group=params.user_group
-  )
\ No newline at end of file
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/files/changeToSecureUid.sh
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/files/changeToSecureUid.sh
new file mode 100644
index 0000000..154c1c0
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/files/changeToSecureUid.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+username=$1
+directories=$2
+
+function find_available_uid() {
+ for ((i=1001; i<=2000; i++))
+ do
+   grep -q $i /etc/passwd
+   if [ "$?" -ne 0 ]
+   then
+    newUid=$i
+    break
+   fi
+ done
+}
+
+find_available_uid
+
+if [ $newUid -eq 0 ]
+then
+  echo "Failed to find Uid between 1000 and 2000"
+  exit 1
+fi
+
+dir_array=($(echo $directories | sed 's/,/\n/g'))
+old_uid=$(id -u $username)
+echo "Changing uid of $username from $old_uid to $newUid"
+echo "Changing directory permisions for ${dir_array[@]}"
+usermod -u $newUid $username && for dir in ${dir_array[@]} ; do chown -Rh 
$newUid $dir ; done
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/hook.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/hook.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/hook.py
index 0f97e28..1fd36d6 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/hook.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/hook.py
@@ -27,6 +27,8 @@ class BeforeAnyHook(Hook):
     env.set_params(params)
     
     setup_jce()
+    setup_users()
+    setup_hadoop_env()
 
 if __name__ == "__main__":
   BeforeAnyHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/params.py
index af67cb6..7a93d41 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/params.py
@@ -18,12 +18,12 @@ limitations under the License.
 """
 
 from resource_management import *
+import collections
+import json
 
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
 artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
 jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is 
already installed by user
 jce_location = config['hostLevelParams']['jdk_location']
@@ -31,3 +31,104 @@ jdk_name = default("/hostLevelParams/jdk_name", None)
 java_home = config['hostLevelParams']['java_home']
 
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+#hadoop params
+if rpm_version:
+  mapreduce_libs_path = "/usr/bigtop/current/hadoop-mapreduce-client/*"
+  hadoop_libexec_dir = "/usr/bigtop/current/hadoop-client/libexec"
+  hadoop_home = "/usr/bigtop/current/hadoop-client"
+else:
+  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+  hadoop_home = "/usr/lib/hadoop"
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+versioned_hdp_root = '/usr/bigtop/current'
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#hadoop params
+hdfs_log_dir_prefix = 
config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = 
config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_root_logger = 
config['configurations']['hadoop-env']['hadoop_root_logger']
+
+if str(config['hostLevelParams']['stack_version']).startswith('2.0') and 
System.get_instance().os_family != "suse":
+  # deprecated rhel jsvc_path
+  jsvc_path = "/usr/libexec/bigtop-utils"
+else:
+  jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize =  
config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize =  
config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = 
default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = 
default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
+
+#users and groups
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+nagios_user = config['configurations']['nagios-env']['nagios_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+tez_user = config['configurations']['tez-env']["tez_user"]
+oozie_user = config['configurations']['oozie-env']["oozie_user"]
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+
+has_namenode = not len(namenode_host) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_tez = 'tez-site' in config['configurations']
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_oozie_server = not len(oozie_servers) == 0
+
+hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+nagios_group = config['configurations']['nagios-env']['nagios_group']
+
+ignore_groupsusers_create = 
default("/configurations/cluster-env/ignore_groupsusers_create", False)
+
+smoke_user_dirs = 
format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+if has_hbase_masters:
+  hbase_user_dirs = 
format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+#repo params
+repo_info = config['hostLevelParams']['repo_info']
+service_repo_info = default("/hostLevelParams/service_repo_info",None)
+
+user_to_groups_dict = collections.defaultdict(lambda:[user_group])
+user_to_groups_dict[smoke_user] = [proxyuser_group]
+if has_ganglia_server:
+  user_to_groups_dict[gmond_user] = [gmond_user]
+  user_to_groups_dict[gmetad_user] = [gmetad_user]
+if has_tez:
+  user_to_groups_dict[tez_user] = [proxyuser_group]
+if has_oozie_server:
+  user_to_groups_dict[oozie_user] = [proxyuser_group]
+
+user_to_gid_dict = collections.defaultdict(lambda:user_group)
+if has_nagios:
+  user_to_gid_dict[nagios_user] = nagios_group
+
+user_list = json.loads(config['hostLevelParams']['user_list'])
+group_list = json.loads(config['hostLevelParams']['group_list'])

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/shared_initialization.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/shared_initialization.py
index 74bdce0..126b8bb 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/shared_initialization.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/shared_initialization.py
@@ -56,3 +56,59 @@ def setup_jce():
             cwd  = security_dir,
             path = ['/bin/','/usr/bin']
     )
+
+def setup_users():
+  """
+  Creates users before cluster installation
+  """
+  import params
+  
+  for group in params.group_list:
+    Group(group,
+        ignore_failures = params.ignore_groupsusers_create
+    )
+    
+  for user in params.user_list:
+    User(user,
+        gid = params.user_to_gid_dict[user],
+        groups = params.user_to_groups_dict[user],
+        ignore_failures = params.ignore_groupsusers_create       
+    )
+           
+  set_uid(params.smoke_user, params.smoke_user_dirs)
+
+  if params.has_hbase_masters:
+    set_uid(params.hbase_user, params.hbase_user_dirs)
+    
+def set_uid(user, user_dirs):
+  """
+  user_dirs - comma separated directories
+  """
+  import params
+
+  File(format("{tmp_dir}/changeUid.sh"),
+       content=StaticFile("changeToSecureUid.sh"),
+       mode=0555)
+  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} 2>/dev/null"),
+          not_if = format("test $(id -u {user}) -gt 1000"))
+    
+def setup_hadoop_env():
+  import params
+  if params.has_namenode:
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+    Directory(params.hadoop_conf_empty_dir,
+              recursive=True,
+              owner='root',
+              group='root'
+    )
+    Link(params.hadoop_conf_dir,
+         to=params.hadoop_conf_empty_dir,
+         not_if=format("ls {hadoop_conf_dir}")
+    )
+    File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'),
+         owner=tc_owner,
+         content=InlineTemplate(params.hadoop_env_sh_template)
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/files/changeToSecureUid.sh
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/files/changeToSecureUid.sh
deleted file mode 100644
index 154c1c0..0000000
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/files/changeToSecureUid.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-username=$1
-directories=$2
-
-function find_available_uid() {
- for ((i=1001; i<=2000; i++))
- do
-   grep -q $i /etc/passwd
-   if [ "$?" -ne 0 ]
-   then
-    newUid=$i
-    break
-   fi
- done
-}
-
-find_available_uid
-
-if [ $newUid -eq 0 ]
-then
-  echo "Failed to find Uid between 1000 and 2000"
-  exit 1
-fi
-
-dir_array=($(echo $directories | sed 's/,/\n/g'))
-old_uid=$(id -u $username)
-echo "Changing uid of $username from $old_uid to $newUid"
-echo "Changing directory permisions for ${dir_array[@]}"
-usermod -u $newUid $username && for dir in ${dir_array[@]} ; do chown -Rh 
$newUid $dir ; done
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/hook.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/hook.py
index a8a5662..61fba18 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/hook.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/hook.py
@@ -33,7 +33,6 @@ class BeforeInstallHook(Hook):
     install_repos()
     install_packages()
     setup_java()
-    setup_users()
 
 if __name__ == "__main__":
   BeforeInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py
index 38d3137..a687ea7 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py
@@ -19,13 +19,15 @@ limitations under the License.
 
 from resource_management import *
 from resource_management.core.system import System
-import os
 import json
 import collections
 
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
 #users and groups
 hbase_user = config['configurations']['hbase-env']['hbase_user']
 smoke_user =  config['configurations']['cluster-env']['smokeuser']
@@ -36,6 +38,8 @@ tez_user = config['configurations']['tez-env']["tez_user"]
 user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
 
+hdfs_log_dir_prefix = 
config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+
 #hosts
 hostname = config["hostname"]
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/shared_initialization.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/shared_initialization.py
index 720777f..03afc44 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/shared_initialization.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -21,41 +21,6 @@ import os
 
 from resource_management import *
 
-def setup_users():
-  """
-  Creates users before cluster installation
-  """
-  import params
-  
-  for group in params.group_list:
-    Group(group,
-        ignore_failures = params.ignore_groupsusers_create
-    )
-    
-  for user in params.user_list:
-    User(user,
-        gid = params.user_to_gid_dict[user],
-        groups = params.user_to_groups_dict[user],
-        ignore_failures = params.ignore_groupsusers_create       
-    )
-           
-  set_uid(params.smoke_user, params.smoke_user_dirs)
-
-  if params.has_hbase_masters:
-    set_uid(params.hbase_user, params.hbase_user_dirs)
-
-def set_uid(user, user_dirs):
-  """
-  user_dirs - comma separated directories
-  """
-  import params
-
-  File(format("{tmp_dir}/changeUid.sh"),
-       content=StaticFile("changeToSecureUid.sh"),
-       mode=0555)
-  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} 2>/dev/null"),
-          not_if = format("test $(id -u {user}) -gt 1000"))
-  
 def setup_java():
   """
   Installs jdk using specific params, that comes from ambari-server
@@ -91,4 +56,8 @@ def setup_java():
   )
 
 def install_packages():
-  Package(['unzip', 'curl'])
+  import params
+  packages = ['unzip', 'curl']
+  if params.rpm_version:
+    packages.append('bigtop-select')
+  Package(packages)

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/files/checkForFormat.sh
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/files/checkForFormat.sh
index f92f613..82dbda1 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/files/checkForFormat.sh
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/files/checkForFormat.sh
@@ -24,6 +24,8 @@ export hdfs_user=$1
 shift
 export conf_dir=$1
 shift
+export bin_dir=$1
+shift
 export mark_dir=$1
 shift
 export name_dirs=$*
@@ -50,7 +52,8 @@ if [[ ! -d $mark_dir ]] ; then
   done
 
   if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+    export PATH=$PATH:$bin_dir
+    su -s /bin/bash - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} 
${command}"
   else
     echo "ERROR: Namenode directory(s) is non empty. Will not format the 
namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
   fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/hook.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/hook.py
index a18a776..c90a55c 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/hook.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/hook.py
@@ -27,7 +27,6 @@ class BeforeStartHook(Hook):
     import params
 
     self.run_custom_hook('before-ANY')
-    self.run_custom_hook('after-INSTALL')
     env.set_params(params)
 
     setup_hadoop()

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/params.py
index ebcaaec..3dca7f0 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/params.py
@@ -23,6 +23,24 @@ import os
 
 config = Script.get_config()
 
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+#hadoop params
+if rpm_version:
+  mapreduce_libs_path = "/usr/bigtop/current/hadoop-mapreduce-client/*"
+  hadoop_libexec_dir = "/usr/bigtop/current/hadoop-client/libexec"
+  hadoop_lib_home = "/usr/bigtop/current/hadoop-client/lib"
+  hadoop_bin = "/usr/bigtop/current/hadoop-client/sbin"
+  hadoop_home = '/usr/bigtop/current/hadoop-client'
+else:
+  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+  hadoop_lib_home = "/usr/lib/hadoop/lib"
+  hadoop_bin = "/usr/lib/hadoop/sbin"
+  hadoop_home = '/usr'
+
+hadoop_conf_dir = "/etc/hadoop/conf"
 #security params
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 
@@ -70,11 +88,7 @@ if has_ganglia_server:
 
 if has_namenode:
   hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
-hadoop_lib_home = "/usr/lib/hadoop/lib"
-hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_pid_dir_prefix = 
config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_home = "/usr"
-hadoop_bin = "/usr/lib/hadoop/sbin"
 
 task_log4j_properties_location = os.path.join(hadoop_conf_dir, 
"task-log4j.properties")
 
@@ -127,8 +141,6 @@ ttnode_heapsize = "1024m"
 
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = 
default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
 mapred_log_dir_prefix = 
default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
 
 #log4j.properties

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py
index 0d00aca..f70eee8 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py
@@ -38,7 +38,8 @@ def setup_hadoop():
     Directory(params.hdfs_log_dir_prefix,
               recursive=True,
               owner='root',
-              group='root'
+              group=params.user_group,
+              mode=0775
     )
     Directory(params.hadoop_pid_dir_prefix,
               recursive=True,
@@ -162,10 +163,11 @@ def install_snappy():
   so_src_dir_x64 = format("{hadoop_home}/lib64")
   so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
   so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
-  Execute(
-    format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} 
{so_target_x86}"))
-  Execute(
-    format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} 
{so_target_x64}"))
+  if params.has_namenode:
+    Execute(
+      format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} 
{so_target_x86}"))
+    Execute(
+      format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} 
{so_target_x64}"))
 
 
 def create_javahome_symlink():

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/repos/repoinfo.xml 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/repos/repoinfo.xml
index c372719..6a385c4 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/repos/repoinfo.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/repos/repoinfo.xml
@@ -21,7 +21,7 @@
   -->
   <os family="redhat6">
     <repo>
-      
<baseurl>http://bigtop01.cloudera.org:8080/job/Bigtop-trunk-Repository/label=centos6/lastSuccessfulBuild/artifact/repo/</baseurl>
+      
<baseurl>http://bigtop.s3.amazonaws.com/releases/0.8.0/redhat/6/x86_64</baseurl>
       <repoid>BIGTOP-0.8</repoid>
       <reponame>BIGTOP</reponame>
     </repo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
index 25611d1..f8e21a4 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
@@ -13,9 +13,8 @@
     "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", 
"RESOURCEMANAGER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", 
"MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"],
-    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", 
"HBASE_REGIONSERVER-START"],
-    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", 
"HIVE_METASTORE-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", 
"HIVE_METASTORE-START", "WEBHCAT_SERVER-START"],
     "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
     "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", 
"RESOURCEMANAGER-START"],
     "MAHOUT_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", 
"RESOURCEMANAGER-START"],

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-env.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-env.xml
index 7b11bde..902b3ca 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-env.xml
@@ -37,4 +37,42 @@
     <property-type>USER</property-type>
     <description>Flume User</description>
   </property>
+
+  <!-- flume-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for flume-env.sh file</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced
+# during Flume startup.
+
+# Enviroment variables can be set here.
+
+export JAVA_HOME={{java_home}}
+
+# Give Flume more memory and pre-allocate, enable remote monitoring via JMX
+# export JAVA_OPTS="-Xms100m -Xmx2000m -Dcom.sun.management.jmxremote"
+
+# Note that the Flume conf directory is always included in the classpath.
+#FLUME_CLASSPATH=""
+
+# export HIVE_HOME=/usr/lib/hive
+# export HCAT_HOME=/usr/lib/hive-hcatalog
+    </value>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/metainfo.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/metainfo.xml
index 3c39550..f15ad9a 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/metainfo.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/metainfo.xml
@@ -22,7 +22,7 @@
       <name>FLUME</name>
       <displayName>Flume</displayName>
       <comment>A distributed service for collecting, aggregating, and moving 
large amounts of streaming data into HDFS</comment>
-      <version>1.4.0.2.0</version>
+      <version>1.5.0.1.671</version>
       <components>
         <component>
           <name>FLUME_HANDLER</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/flume.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/flume.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/flume.py
index 6109d3e..2db4039 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/flume.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/flume.py
@@ -30,9 +30,14 @@ def flume(action = None):
     for n in find_expected_agent_names():
       os.unlink(os.path.join(params.flume_conf_dir, n, 'ambari-meta.json'))
 
-    Directory(params.flume_conf_dir)
+    Directory(params.flume_conf_dir, recursive=True)
     Directory(params.flume_log_dir, owner=params.flume_user)
 
+    File(format("{flume_conf_dir}/flume-env.sh"),
+         owner=params.flume_user,
+         content=InlineTemplate(params.flume_env_sh_template)
+    )
+
     flume_agents = {}
     if params.flume_conf_content is not None:
       flume_agents = build_flume_topology(params.flume_conf_content)
@@ -63,7 +68,7 @@ def flume(action = None):
       _set_desired_state('STARTED')
       
     flume_base = format('su -s /bin/bash {flume_user} -c "export 
JAVA_HOME={java_home}; '
-      '/usr/bin/flume-ng agent --name {{0}} --conf {{1}} --conf-file {{2}} 
{{3}}"')
+      '{flume_bin} agent --name {{0}} --conf {{1}} --conf-file {{2}} {{3}}"')
 
     for agent in cmd_target_names():
       flume_agent_conf_dir = params.flume_conf_dir + os.sep + agent

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/flume_check.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/flume_check.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/flume_check.py
index 3036e20..b93b8e8 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/flume_check.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/flume_check.py
@@ -31,7 +31,7 @@ class FlumeServiceCheck(Script):
       Execute(format("{kinit_path_local} -kt {http_keytab} 
{principal_replaced}"),
               user=params.smoke_user)
 
-    Execute(format('env JAVA_HOME={java_home} /usr/bin/flume-ng version'),
+    Execute(format('env JAVA_HOME={java_home} {flume_bin} version'),
             logoutput=True,
             tries = 3,
             try_sleep = 20)

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/params.py
index 128eed4..ef53fa9 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/params.py
@@ -26,9 +26,17 @@ proxyuser_group =  
config['configurations']['hadoop-env']['proxyuser_group']
 
 security_enabled = False
 
-java_home = config['hostLevelParams']['java_home']
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+#hadoop params
+if rpm_version:
+  flume_bin = '/usr/bigtop/current/flume-client/bin/flume-ng'
+else:
+  flume_bin = '/usr/bin/flume-ng'
 
 flume_conf_dir = '/etc/flume/conf'
+java_home = config['hostLevelParams']['java_home']
 flume_log_dir = '/var/log/flume'
 flume_run_dir = '/var/run/flume'
 flume_user = 'flume'
@@ -50,6 +58,8 @@ else:
 targets = default('/commandParams/flume_handler', None)
 flume_command_targets = [] if targets is None else targets.split(',')
 
+flume_env_sh_template = config['configurations']['flume-env']['content']
+
 ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', [])
 ganglia_server_host = None
 if 0 != len(ganglia_server_hosts):

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/configuration/ganglia-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/configuration/ganglia-env.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/configuration/ganglia-env.xml
index e42baa5..3328acf 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/configuration/ganglia-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/configuration/ganglia-env.xml
@@ -68,5 +68,10 @@
     <value>4</value>
     <description>(-t) Specifies the number of threads used for writing RRD 
files. The default is 4. Increasing this number will allow rrdcached to have 
more simultaneous I/O requests into the kernel. This may allow the kernel to 
re-order disk writes, resulting in better disk throughput.</description>
   </property>
+  <property>
+    <name>additional_clusters</name>
+    <value> </value>
+    <description>Add additional desired Ganglia metrics cluster in the form 
"name1:port1,name2:port2". Ensure that the names and ports are unique across 
all cluster and ports are available on ganglia server host. Ambari has reserved 
ports 8667-8669 within its own pool.</description>
+  </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/files/startRrdcached.sh
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/files/startRrdcached.sh
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/files/startRrdcached.sh
index 262f716..dc47f39 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/files/startRrdcached.sh
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/files/startRrdcached.sh
@@ -31,7 +31,7 @@ rrdcachedRunningPid=`getRrdcachedRunningPid`;
 # Only attempt to start rrdcached if there's not already one running.
 if [ -z "${rrdcachedRunningPid}" ]
 then
-    su - ${GMETAD_USER} -s /bin/bash -c "${RRDCACHED_BIN} -p 
${RRDCACHED_PID_FILE} \
+    su -s /bin/bash - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p 
${RRDCACHED_PID_FILE} \
              -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
              -m 777 -P FLUSH,STATS,HELP -l 
unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
              -b ${RRDCACHED_BASE_DIR} -B -t ${RRDCACHED_WRITE_THREADS} \

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/scripts/ganglia_monitor.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/scripts/ganglia_monitor.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/scripts/ganglia_monitor.py
index 96cfdda..ede1a0b 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/scripts/ganglia_monitor.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/scripts/ganglia_monitor.py
@@ -110,12 +110,12 @@ class GangliaMonitor(Script):
 
     for gmond_app in params.gmond_apps:
       generate_daemon("gmond",
-                      name=gmond_app,
+                      name=gmond_app[0],
                       role="server",
                       owner="root",
                       group=params.user_group)
       generate_daemon("gmond",
-                      name = gmond_app,
+                      name = gmond_app[0],
                       role = "monitor",
                       owner = "root",
                       group = params.user_group)

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/scripts/params.py
index e155122..f8373ac 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/scripts/params.py
@@ -31,11 +31,16 @@ ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
 gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
 gmond_user = config['configurations']['ganglia-env']["gmond_user"]
 
-gmond_app_str = default("/configurations/hadoop-env/enabled_app_servers", None)
-gmond_apps = [] if gmond_app_str is None else gmond_app_str.split(',')
-gmond_apps = [x.strip() for x in gmond_apps]
-gmond_allowed_apps = ["Application1", "Application2", "Application3"]
-gmond_apps = set(gmond_apps) & set(gmond_allowed_apps)
+gmond_add_clusters_str = 
default("/configurations/ganglia-env/additional_clusters", None)
+if gmond_add_clusters_str and gmond_add_clusters_str.isspace():
+  gmond_add_clusters_str = None
+
+gmond_app_strs = [] if gmond_add_clusters_str is None else 
gmond_add_clusters_str.split(',')
+gmond_apps = []
+
+for x in gmond_app_strs:
+  a,b = x.strip().split(':')
+  gmond_apps.append((a.strip(),b.strip()))
 
 if System.get_instance().os_family == "ubuntu":
   gmond_service_name = "ganglia-monitor"
@@ -103,12 +108,12 @@ has_nimbus_server = not len(nimbus_server_hosts) == 0
 has_supervisor_server = not len(supervisor_server_hosts) == 0
 
 ganglia_cluster_names = {
-  "jtnode_host": [("HDPJournalNode", 8654)],
+  "jn_hosts": [("HDPJournalNode", 8654)],
   "flume_hosts": [("HDPFlumeServer", 8655)],
   "hbase_rs_hosts": [("HDPHBaseRegionServer", 8656)],
   "nm_hosts": [("HDPNodeManager", 8657)],
   "mapred_tt_hosts": [("HDPTaskTracker", 8658)],
-  "slave_hosts": [("HDPDataNode", 8659), ("HDPSlaves", 8660)],
+  "slave_hosts": [("HDPDataNode", 8659)],
   "namenode_host": [("HDPNameNode", 8661)],
   "jtnode_host": [("HDPJobTracker", 8662)],
   "hbase_master_hosts": [("HDPHBaseMaster", 8663)],
@@ -116,12 +121,12 @@ ganglia_cluster_names = {
   "hs_host": [("HDPHistoryServer", 8666)],
   "nimbus_hosts": [("HDPNimbus", 8649)],
   "supervisor_hosts": [("HDPSupervisor", 8650)],
-  "Application1": [("Application1", 8667)],
-  "Application2": [("Application2", 8668)],
-  "Application3": [("Application3", 8669)]
+  "ReservedPort1": [("ReservedPort1", 8667)],
+  "ReservedPort2": [("ReservedPort2", 8668)],
+  "ReservedPort3": [("ReservedPort3", 8669)]
 }
 
-ganglia_clusters = []
+ganglia_clusters = [("HDPSlaves", 8660)]
 
 for key in ganglia_cluster_names:
   property_name = format("/clusterHostInfo/{key}")
@@ -129,10 +134,10 @@ for key in ganglia_cluster_names:
   if not len(hosts) == 0:
     for x in ganglia_cluster_names[key]:
       ganglia_clusters.append(x)
+
 if len(gmond_apps) > 0:
   for gmond_app in gmond_apps:
-    for x in ganglia_cluster_names[gmond_app]:
-      ganglia_clusters.append(x)
+    ganglia_clusters.append(gmond_app)
 
 ganglia_apache_config_file = "/etc/apache2/conf.d/ganglia.conf"
 ganglia_web_path="/var/www/html/ganglia"

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/configuration/hbase-site.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/configuration/hbase-site.xml
index a0ca6d7..84900d1 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/configuration/hbase-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/configuration/hbase-site.xml
@@ -222,45 +222,6 @@
     </description>
   </property>
 
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value>/etc/security/keytabs/hbase.service.keytab</value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value>hbase/_h...@example.com</value>
-    <description>Ex. "hbase/_h...@example.com".  The kerberos principal name
-    that should be used to run the HMaster process.  The principal name should
-    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-    portion, it will be replaced with the actual hostname of the running
-    instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value>/etc/security/keytabs/hbase.service.keytab</value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value>hbase/_h...@example.com</value>
-    <description>Ex. "hbase/_h...@example.com".  The kerberos principal name
-    that should be used to run the HRegionServer process.  The principal name
-    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-    hostname portion, it will be replaced with the actual hostname of the
-    running instance.  An entry for this principal must exist in the file
-    specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
   <!-- Additional configuration specific to HBase security -->
   <property>
     <name>hbase.superuser</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/metainfo.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/metainfo.xml
index 3ac282f..c304772 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/metainfo.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/metainfo.xml
@@ -24,7 +24,7 @@
       <comment>Non-relational distributed database and centralized service for 
configuration management &amp;
         synchronization
       </comment>
-      <version>0.98.2.686</version>
+      <version>0.98.2.687</version>
       <components>
         <component>
           <name>HBASE_MASTER</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/files/hbaseSmokeVerify.sh
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/files/hbaseSmokeVerify.sh
index eedffd3..5c320c0 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/files/hbaseSmokeVerify.sh
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/files/hbaseSmokeVerify.sh
@@ -21,7 +21,8 @@
 #
 conf_dir=$1
 data=$2
-echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > 
/tmp/hbase_chk_verify
+hbase_cmd=$3
+echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > 
/tmp/hbase_chk_verify
 cat /tmp/hbase_chk_verify
 echo "Looking for $data"
 grep -q $data /tmp/hbase_chk_verify

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/hbase_decommission.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/hbase_decommission.py
index 4f7ed9e..a623927 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/hbase_decommission.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/hbase_decommission.py
@@ -33,42 +33,42 @@ def hbase_decommission(env):
   )
   
   if params.hbase_excluded_hosts and params.hbase_excluded_hosts.split(","):
+    hosts = params.hbase_excluded_hosts.split(",")
+  elif params.hbase_included_hosts and params.hbase_included_hosts.split(","):
+    hosts = params.hbase_included_hosts.split(",")
 
-    if params.hbase_drain_only == 'true':
-      hosts = params.hbase_excluded_hosts.split(",")
-      for host in hosts:
-        if host:
-          regiondrainer_cmd = format(
-            "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main 
{region_drainer} remove {host}")
-          Execute(regiondrainer_cmd,
-                  user=params.hbase_user,
-                  logoutput=True
-          )
-          pass
-      pass
-
-    else:
+  if params.hbase_drain_only:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main 
{region_drainer} remove {host}")
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+        pass
+    pass
 
-      hosts = params.hbase_excluded_hosts.split(",")
-      for host in hosts:
-        if host:
-          regiondrainer_cmd = format(
-            "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main 
{region_drainer} add {host}")
-          regionmover_cmd = format(
-            "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main 
{region_mover} unload {host}")
+  else:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main 
{region_drainer} add {host}")
+        regionmover_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main 
{region_mover} unload {host}")
 
-          Execute(regiondrainer_cmd,
-                  user=params.hbase_user,
-                  logoutput=True
-          )
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
 
-          Execute(regionmover_cmd,
-                  user=params.hbase_user,
-                  logoutput=True
-          )
-        pass
+        Execute(regionmover_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
       pass
     pass
-
+  pass
+  
 
   pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/hbase_service.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/hbase_service.py
index c0db5b5..4656b32 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/hbase_service.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/hbase_service.py
@@ -44,7 +44,7 @@ def hbase_service(
       Execute ( daemon_cmd,
         user = params.hbase_user,
         # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode
-        timeout = 30,
+        timeout = 60,
         on_timeout = format("{no_op_test} && kill -9 `cat {pid_file}`")
       )
       

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/params.py
index 364649c..f368703 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/params.py
@@ -26,16 +26,31 @@ import status_params
 config = Script.get_config()
 exec_tmp_dir = Script.get_tmp_dir()
 
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+#hadoop params
+if rpm_version:
+  hadoop_bin_dir = format("/usr/bigtop/current/hadoop-client/bin")
+  daemon_script = 
format('/usr/bigtop/current/hbase-client/bin/hbase-daemon.sh')
+  region_mover = format('/usr/bigtop/current/hbase-client/bin/region_mover.rb')
+  region_drainer = 
format('/usr/bigtop/current/hbase-client/bin/draining_servers.rb')
+  hbase_cmd = format('/usr/bigtop/current/hbase-client/bin/hbase')
+else:
+  hadoop_bin_dir = "/usr/bin"
+  daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+  region_mover = "/usr/lib/hbase/bin/region_mover.rb"
+  region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
+  hbase_cmd = "/usr/lib/hbase/bin/hbase"
+
+hadoop_conf_dir = "/etc/hadoop/conf"
 hbase_conf_dir = "/etc/hbase/conf"
-daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
-region_mover = "/usr/lib/hbase/bin/region_mover.rb"
-region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
-hbase_cmd = "/usr/lib/hbase/bin/hbase"
 hbase_excluded_hosts = config['commandParams']['excluded_hosts']
-hbase_drain_only = config['commandParams']['mark_draining_only']
+hbase_drain_only = default("/commandParams/mark_draining_only",False)
 hbase_included_hosts = config['commandParams']['included_hosts']
 
 hbase_user = status_params.hbase_user
+hbase_principal_name = 
config['configurations']['hbase-env']['hbase_principal_name']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 _authentication = 
config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = config['configurations']['cluster-env']['security_enabled']
@@ -72,7 +87,7 @@ if 'slave_hosts' in config['clusterHostInfo']:
   rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', 
'/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that 
region servers on same nodes as slaves
 else:
   rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', 
'/clusterHostInfo/all_hosts') 
-  
+
 smoke_test_user = config['configurations']['cluster-env']['smokeuser']
 smokeuser_permissions = "RWXCA"
 service_check_data = functions.get_unique_id_and_date()
@@ -89,7 +104,7 @@ smoke_user_keytab = 
config['configurations']['cluster-env']['smokeuser_keytab']
 hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", 
"/usr/sbin"])
 if security_enabled:
-  kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} 
{hbase_user};")
+  kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} 
{hbase_principal_name};")
 else:
   kinit_cmd = ""
 
@@ -105,7 +120,6 @@ hbase_hdfs_root_dir = 
config['configurations']['hbase-site']['hbase.rootdir']
 hbase_staging_dir = "/apps/hbase/staging"
 #for create_hdfs_directory
 hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = 
config['configurations']['hadoop-env']['hdfs_principal_name']
@@ -119,5 +133,6 @@ HdfsDirectory = functools.partial(
   hdfs_user=hdfs_user,
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
 )

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/service_check.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/service_check.py
index 8fb38f7..15a306b 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/service_check.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/service_check.py
@@ -44,7 +44,7 @@ class HbaseServiceCheck(Script):
     
     if params.security_enabled:    
       hbase_grant_premissions_file = 
format("{exec_tmp_dir}/hbase_grant_permissions.sh")
-      grantprivelegecmd = format("{kinit_cmd} hbase shell 
{hbase_grant_premissions_file}")
+      grantprivelegecmd = format("{kinit_cmd} {hbase_cmd} shell 
{hbase_grant_premissions_file}")
   
       File( hbase_grant_premissions_file,
         owner   = params.hbase_user,
@@ -57,8 +57,8 @@ class HbaseServiceCheck(Script):
         user = params.hbase_user,
       )
 
-    servicecheckcmd = format("{smokeuser_kinit_cmd} hbase --config 
{hbase_conf_dir} shell {hbase_servicecheck_file}")
-    smokeverifycmd = format("{smokeuser_kinit_cmd} 
{exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data}")
+    servicecheckcmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config 
{hbase_conf_dir} shell {hbase_servicecheck_file}")
+    smokeverifycmd = format("{smokeuser_kinit_cmd} 
{exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data} 
{hbase_cmd}")
   
     Execute( servicecheckcmd,
       tries     = 3,

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml
index e776148..c6dd202 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml
@@ -83,7 +83,12 @@
     <property-type>USER</property-type>
     <description>User to run HDFS as</description>
   </property>
-  
+  <property>
+    <name>dfs.datanode.data.dir.mount.file</name>
+    <value>/etc/hadoop/conf/dfs_data_dir_mount.hist</value>
+    <description>File path that contains the last known mount point for each 
data dir. This file is used to avoid creating a DFS data dir on the root drive 
(and filling it up) if a path was previously mounted on a drive.</description>
+  </property>
+
   <!-- hadoop-env.sh -->
   <property>
     <name>content</name>
@@ -199,9 +204,6 @@ export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
 
 #Mostly required for hadoop 2.0
 export 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
-
-#Hadoop logging options
-export HADOOP_ROOT_LOGGER={{hadoop_root_logger}}
     </value>
   </property>
   

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
index dfe71db..e67ed9f 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
@@ -298,96 +298,12 @@
   </property>
 
   <property>
-    <name>dfs.namenode.kerberos.principal</name>
-    <value>nn/_h...@example.com</value>
-    <description>
-      Kerberos principal name for the NameNode
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.principal</name>
-    <value>nn/_h...@example.com</value>
-    <description>
-      Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-  <!--
-    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
-  -->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value>HTTP/_h...@example.com</value>
-    <description>The Kerberos principal for the host that the NameNode runs 
on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value>HTTP/_h...@example.com</value>
-    <description>The Kerberos principal for the hostthat the secondary 
NameNode runs on.</description>
-
-  </property>
-
-  <property>
     <!-- cluster variant -->
     <name>dfs.namenode.secondary.http-address</name>
     <value>localhost:50090</value>
     <description>Address of secondary namenode web server</description>
   </property>
 
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value>HTTP/_h...@example.com</value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value>/etc/security/keytabs/spnego.service.keytab</value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value>dn/_h...@example.com</value>
-    <description>
-      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by 
the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value>/etc/security/keytabs/nn.service.keytab</value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value>/etc/security/keytabs/nn.service.keytab</value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value>/etc/security/keytabs/dn.service.keytab</value>
-    <description>
-      The filename of the keytab file for the DataNode.
-    </description>
-  </property>
 
   <property>
     <name>dfs.namenode.https-address</name>
@@ -501,4 +417,14 @@
       When enabled, a recovery of any failed directory is attempted during 
checkpoint.</description>
   </property>
 
+  <property>
+    <name>dfs.http.policy</name>
+    <value>HTTP_ONLY</value>
+    <description>
+      Decide if HTTPS(SSL) is supported on HDFS This configures the HTTP 
endpoint for HDFS daemons:
+      The following values are supported: - HTTP_ONLY : Service is provided 
only on http - HTTPS_ONLY :
+      Service is provided only on https - HTTP_AND_HTTPS : Service is provided 
both on http and https
+    </description>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/metainfo.xml 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/metainfo.xml
index a452474..90bb9f7 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/metainfo.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/metainfo.xml
@@ -22,7 +22,7 @@
       <name>HDFS</name>
       <displayName>HDFS</displayName>
       <comment>Apache Hadoop Distributed File System</comment>
-      <version>2.4.0.724</version>
+      <version>2.4.1.726</version>
 
       <components>
         <component>

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/files/checkForFormat.sh
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/files/checkForFormat.sh
index d22d901..be8c75f 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/files/checkForFormat.sh
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/files/checkForFormat.sh
@@ -24,6 +24,8 @@ export hdfs_user=$1
 shift
 export conf_dir=$1
 shift
+export bin_dir=$1
+shift
 export old_mark_dir=$1
 shift
 export mark_dir=$1
@@ -56,7 +58,7 @@ if [[ ! -d $mark_dir ]] ; then
   done
 
   if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+    su -s /bin/bash - ${hdfs_user} -c "export PATH=$PATH:${bin_dir} ; yes Y | 
hadoop --config ${conf_dir} ${command}"
   else
     echo "ERROR: Namenode directory(s) is non empty. Will not format the 
namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
   fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs.py
index 6f24f8e..873aa15 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs.py
@@ -65,6 +65,15 @@ def hdfs(name=None):
             group=params.user_group
   )
 
+  XmlConfig("core-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['core-site'],
+            
configuration_attributes=params.config['configuration_attributes']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+  )
+
   File(os.path.join(params.hadoop_conf_dir, 'slaves'),
        owner=tc_owner,
        content=Template("slaves.j2")

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_datanode.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_datanode.py
index e38d9af..c93c6e4 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_datanode.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_datanode.py
@@ -18,25 +18,34 @@ limitations under the License.
 """
 
 from resource_management import *
+from resource_management.libraries.functions.dfs_datanode_helper import 
handle_dfs_data_dir
 from utils import service
 
+
+def create_dirs(data_dir, params):
+  """
+  :param data_dir: The directory to create
+  :param params: parameters
+  """
+  Directory(data_dir,
+            recursive=True,
+            mode=0755,
+            owner=params.hdfs_user,
+            group=params.user_group,
+            ignore_failures=True
+  )
+
+
 def datanode(action=None):
   import params
-
   if action == "configure":
     Directory(params.dfs_domain_socket_dir,
               recursive=True,
               mode=0751,
               owner=params.hdfs_user,
               group=params.user_group)
-    for data_dir in params.dfs_data_dir.split(","):
-      Directory(data_dir,
-                recursive=True,
-                mode=0755,
-                owner=params.hdfs_user,
-                group=params.user_group,
-                ignore_failures=True
-      )
+
+    handle_dfs_data_dir(create_dirs, params)
 
   elif action == "start" or action == "stop":
     service(

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
index 3456441..5500b97 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
@@ -45,11 +45,11 @@ def namenode(action=None, do_format=True):
       create_log_dir=True
     )
     if params.dfs_ha_enabled:
-      dfs_check_nn_status_cmd = format("su - {hdfs_user} -c 'hdfs haadmin 
-getServiceState {namenode_id} | grep active > /dev/null'")
+      dfs_check_nn_status_cmd = format("su -s /bin/bash - {hdfs_user} -c 
'export PATH=$PATH:{hadoop_bin_dir} ; hdfs --config {hadoop_conf_dir} haadmin 
-getServiceState {namenode_id} | grep active > /dev/null'")
     else:
       dfs_check_nn_status_cmd = None
 
-    namenode_safe_mode_off = format("su - {hdfs_user} -c 'hadoop dfsadmin 
-safemode get' | grep 'Safe mode is OFF'")
+    namenode_safe_mode_off = format("su -s /bin/bash - {hdfs_user} -c 'export 
PATH=$PATH:{hadoop_bin_dir} ; hadoop --config {hadoop_conf_dir} dfsadmin 
-safemode get' | grep 'Safe mode is OFF'")
 
     if params.security_enabled:
       Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} 
{hdfs_principal_name}"),
@@ -110,14 +110,16 @@ def format_namenode(force=None):
   if not params.dfs_ha_enabled:
     if force:
       ExecuteHadoop('namenode -format',
-                    kinit_override=True)
+                    kinit_override=True,
+                    bin_dir=params.hadoop_bin_dir,
+                    conf_dir=hadoop_conf_dir)
     else:
       File(format("{tmp_dir}/checkForFormat.sh"),
            content=StaticFile("checkForFormat.sh"),
            mode=0755)
       Execute(format(
-        "{tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} 
{old_mark_dir} "
-        "{mark_dir} {dfs_name_dir}"),
+        "{tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} "
+        "{hadoop_bin_dir} {old_mark_dir} {mark_dir} {dfs_name_dir}"),
               not_if=format("test -d {old_mark_dir} || test -d {mark_dir}"),
               path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
       )
@@ -154,4 +156,5 @@ def decommission():
   ExecuteHadoop(nn_refresh_cmd,
                 user=hdfs_user,
                 conf_dir=conf_dir,
-                kinit_override=True)
+                kinit_override=True,
+                bin_dir=params.hadoop_bin_dir)

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/namenode.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/namenode.py
index 8dae3eb..a0b07aa 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/namenode.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/namenode.py
@@ -88,7 +88,7 @@ class NameNode(Script):
     
     
     def startRebalancingProcess(threshold):
-      rebalanceCommand = format('hadoop --config {hadoop_conf_dir} balancer 
-threshold {threshold}')
+      rebalanceCommand = format('export PATH=$PATH:{hadoop_bin_dir} ; hadoop 
--config {hadoop_conf_dir} balancer -threshold {threshold}')
       return ['su','-',params.hdfs_user,'-c', rebalanceCommand]
     
     command = startRebalancingProcess(threshold)

Reply via email to