http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/package/scripts/params.py
index 38bcacf..18712fa 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/package/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/package/scripts/params.py
@@ -29,4 +29,4 @@ hadoop_home = '/usr'
 java64_home = config['hostLevelParams']['java_home']
 
 tez_user = config['configurations']['tez-env']['tez_user']
-user_group = config['configurations']['hadoop-env']['user_group']
\ No newline at end of file
+user_group = config['configurations']['cluster-env']['user_group']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
index 500727c..0fdaf18 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
@@ -32,10 +32,9 @@ mapred_user = status_params.mapred_user
 yarn_user = status_params.yarn_user
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
-_authentication = 
config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 
'kerberos')
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 yarn_executor_container_group = 
config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
 kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), 
"/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 rm_hosts = config['clusterHostInfo']['rm_host']
@@ -91,7 +90,7 @@ yarn_job_summary_log = 
format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduc
 mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
 yarn_bin = "/usr/lib/hadoop-yarn/sbin"
 
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 limits_conf_dir = "/etc/security/limits.d"
 hadoop_conf_dir = "/etc/hadoop/conf"
 yarn_container_bin = "/usr/lib/hadoop-yarn/bin"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py
index 1e8fce9..7a61c8a 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py
@@ -25,9 +25,9 @@ config = Script.get_config()
 
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 falcon_user = config['configurations']['falcon-env']['falcon_user']
-smoke_user =  config['configurations']['hadoop-env']['smokeuser']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
 
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 
 java_home = config['hostLevelParams']['java_home']
@@ -45,14 +45,13 @@ falcon_host = 
config['clusterHostInfo']['falcon_server_hosts'][0]
 falcon_port = config['configurations']['falcon-env']['falcon_port']
 falcon_runtime_properties = 
config['configurations']['falcon-runtime.properties']
 falcon_startup_properties = 
config['configurations']['falcon-startup.properties']
-smokeuser_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 falcon_env_sh_template = config['configurations']['falcon-env']['content']
 
 falcon_webapp_dir = '/var/lib/falcon/webapp'
 flacon_apps_dir = '/apps/falcon'
 #for create_hdfs_directory
-_authentication = 
config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 
'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py
index b4416f4..19668c7 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py
@@ -29,7 +29,7 @@ log_dir = 
config['configurations']['storm-env']['storm_log_dir']
 pid_dir = status_params.pid_dir
 conf_dir = "/etc/storm/conf"
 local_dir = config['configurations']['storm-site']['storm.local.dir']
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 java64_home = config['hostLevelParams']['java_home']
 nimbus_host = config['configurations']['storm-site']['nimbus.host']
 nimbus_port = config['configurations']['storm-site']['nimbus.thrift.port']
@@ -49,12 +49,11 @@ if 'ganglia_server_host' in config['clusterHostInfo'] and \
 else:
   ganglia_installed = False
   
-_authentication = 
config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 
'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 if security_enabled:
   _hostname_lowercase = config['hostname'].lower()
-  _kerberos_domain = config['configurations']['hadoop-env']['kerberos_domain']
+  _kerberos_domain = config['configurations']['cluster-env']['kerberos_domain']
   _storm_principal_name = 
config['configurations']['storm-env']['storm_principal_name']
   storm_jaas_principal = 
_storm_principal_name.replace('_HOST',_hostname_lowercase)
   storm_keytab_path = config['configurations']['storm-env']['storm_keytab']

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/main/resources/stacks/HDP/2.1/services/TEZ/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.1/services/TEZ/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.1/services/TEZ/package/scripts/params.py
index 58b836e..78d2ea2 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.1/services/TEZ/package/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.1/services/TEZ/package/scripts/params.py
@@ -29,5 +29,5 @@ hadoop_home = '/usr'
 java64_home = config['hostLevelParams']['java_home']
 
 tez_user = config['configurations']['tez-env']['tez_user']
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 tez_env_sh_template = config['configurations']['tez-env']['content']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json 
b/ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json
index 57d1137..38e1032 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json
@@ -351,21 +351,23 @@
             "hcat_log_dir": "/var/log/webhcat", 
             "hive_aux_jars_path": 
"/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar", 
             "hive_database": "New MySQL Database"
-        }, 
+        },
+        "cluster-env": {
+            "security_enabled": "false",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop"
+        },
         "hadoop-env": {
-            "security_enabled": "false", 
-            "namenode_opt_maxnewsize": "200m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
+            "namenode_opt_maxnewsize": "200m",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables 
here.\n\n# The only required environment variable is JAVA_HOME.  All others 
are\n# optional.  When running a distributed configuration it is best to\n# set 
JAVA_HOME in this file, so that it is correctly defined on\n# remote 
nodes.\n\n# The java implementation to use.  Required.\nexport 
JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop 
Configuration Directory\n#TODO: if env var set that can cause problems\nexport 
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n# Hadoop home 
directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# this is 
different for HDP1 #\n# Path to jsvc required by secure HDP 2.0 datanode\n# 
export JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. 
Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java 
runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# 
History server logs\nexport 
HADOOP_MAPRED_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# Command specific 
options appended to HADOOP_OPTS when specified\nexport 
HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir
 _prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails 
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
-Dmapred.log.dir=$HADOOP_MAPRED_LOG_DIR 
${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console 
${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=ERROR,DRFAS 
${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -v
 erbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple 
commands (fs, dfs, fsck, distcp etc)\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure 
datanodes, user to run the datanode as after dropping privileges\nexport 
HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by 
default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o 
SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs 
by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# Where log 
files are stored in the secure data environment.\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export 
HADOOP_SLAVES=${HAD
 OOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. 
 Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# 
Seconds to sleep between slave commands.  Unset by default.  This\n# can be 
useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive 
faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# 
The directory where pid files are stored. /tmp by default.\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
History server pid\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n#
 A string representing this instance of hadoop. $USER by default.\nexport 
HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  
See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from stan
 dard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql 
connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n 
 JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# 
Setting path to hdfs command line\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 
2.0\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64\n
    ", 
-            "hdfs_user": "hdfs", 
-            "user_group": "hadoop", 
+            "hdfs_user": "hdfs",
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop"
         }, 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/default.json 
b/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
index 4a42faa..3554ba1 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
@@ -355,23 +355,26 @@
             "hive_database": "New MySQL Database"
         }, 
         "hadoop-env": {
-            "security_enabled": "false", 
             "namenode_opt_maxnewsize": "200m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables 
here.\n\n# The only required environment variable is JAVA_HOME.  All others 
are\n# optional.  When running a distributed configuration it is best to\n# set 
JAVA_HOME in this file, so that it is correctly defined on\n# remote 
nodes.\n\n# The java implementation to use.  Required.\nexport 
JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop 
Configuration Directory\n#TODO: if env var set that can cause problems\nexport 
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n# Hadoop home 
directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# this is 
different for HDP1 #\n# Path to jsvc required by secure HDP 2.0 datanode\n# 
export JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. 
Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java 
runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# 
History server logs\nexport 
HADOOP_MAPRED_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# Command specific 
options appended to HADOOP_OPTS when specified\nexport 
HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir
 _prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails 
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
-Dmapred.log.dir=$HADOOP_MAPRED_LOG_DIR 
${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console 
${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=ERROR,DRFAS 
${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -v
 erbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple 
commands (fs, dfs, fsck, distcp etc)\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure 
datanodes, user to run the datanode as after dropping privileges\nexport 
HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by 
default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o 
SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs 
by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# Where log 
files are stored in the secure data environment.\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export 
HADOOP_SLAVES=${HAD
 OOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. 
 Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# 
Seconds to sleep between slave commands.  Unset by default.  This\n# can be 
useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive 
faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# 
The directory where pid files are stored. /tmp by default.\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
History server pid\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n#
 A string representing this instance of hadoop. $USER by default.\nexport 
HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  
See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from stan
 dard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql 
connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n 
 JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# 
Setting path to hdfs command line\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 
2.0\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64\n
    ", 
-            "hdfs_user": "hdfs", 
-            "user_group": "hadoop", 
+            "hdfs_user": "hdfs",
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop"
-        }, 
-        "hbase-env": {
+        },
+        "cluster-env": {
+            "security_enabled": "false",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop"
+        },
+
+      "hbase-env": {
             "hbase_pid_dir": "/var/run/hbase", 
             "hbase_user": "hbase", 
             "hbase_master_heapsize": "1024m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json
 
b/ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json
index 19bafac..d6a9754 100644
--- 
a/ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json
+++ 
b/ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json
@@ -351,21 +351,23 @@
             "hcat_log_dir": "/var/log/webhcat", 
             "hive_aux_jars_path": 
"/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar", 
             "hive_database": "New MySQL Database"
-        }, 
+        },
+        "cluster-env": {
+            "security_enabled": "false",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop"
+        },
         "hadoop-env": {
-            "security_enabled": "false", 
             "namenode_opt_maxnewsize": "200m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables 
here.\n\n# The only required environment variable is JAVA_HOME.  All others 
are\n# optional.  When running a distributed configuration it is best to\n# set 
JAVA_HOME in this file, so that it is correctly defined on\n# remote 
nodes.\n\n# The java implementation to use.  Required.\nexport 
JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop 
Configuration Directory\n#TODO: if env var set that can cause problems\nexport 
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n# Hadoop home 
directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# this is 
different for HDP1 #\n# Path to jsvc required by secure HDP 2.0 datanode\n# 
export JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. 
Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java 
runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# 
History server logs\nexport 
HADOOP_MAPRED_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# Command specific 
options appended to HADOOP_OPTS when specified\nexport 
HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir
 _prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails 
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
-Dmapred.log.dir=$HADOOP_MAPRED_LOG_DIR 
${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console 
${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=ERROR,DRFAS 
${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -v
 erbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple 
commands (fs, dfs, fsck, distcp etc)\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure 
datanodes, user to run the datanode as after dropping privileges\nexport 
HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by 
default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o 
SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs 
by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# Where log 
files are stored in the secure data environment.\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export 
HADOOP_SLAVES=${HAD
 OOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. 
 Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# 
Seconds to sleep between slave commands.  Unset by default.  This\n# can be 
useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive 
faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# 
The directory where pid files are stored. /tmp by default.\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
History server pid\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n#
 A string representing this instance of hadoop. $USER by default.\nexport 
HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  
See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from stan
 dard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql 
connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n 
 JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# 
Setting path to hdfs command line\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 
2.0\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64\n
    ", 
-            "hdfs_user": "hdfs", 
-            "user_group": "hadoop", 
+            "hdfs_user": "hdfs",
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop"
         }, 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/1.3.2/configs/default_client.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/1.3.2/configs/default_client.json 
b/ambari-server/src/test/python/stacks/1.3.2/configs/default_client.json
index 2221eee..ccf5a16 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/default_client.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/default_client.json
@@ -351,21 +351,23 @@
             "hcat_log_dir": "/var/log/webhcat", 
             "hive_aux_jars_path": 
"/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar", 
             "hive_database": "New MySQL Database"
-        }, 
+        },
+        "cluster-env": {
+            "security_enabled": "false",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop"
+        },
         "hadoop-env": {
-            "security_enabled": "false", 
-            "namenode_opt_maxnewsize": "200m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
+            "namenode_opt_maxnewsize": "200m",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables 
here.\n\n# The only required environment variable is JAVA_HOME.  All others 
are\n# optional.  When running a distributed configuration it is best to\n# set 
JAVA_HOME in this file, so that it is correctly defined on\n# remote 
nodes.\n\n# The java implementation to use.  Required.\nexport 
JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop 
Configuration Directory\n#TODO: if env var set that can cause problems\nexport 
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n# Hadoop home 
directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# this is 
different for HDP1 #\n# Path to jsvc required by secure HDP 2.0 datanode\n# 
export JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. 
Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java 
runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# 
History server logs\nexport 
HADOOP_MAPRED_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# Command specific 
options appended to HADOOP_OPTS when specified\nexport 
HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir
 _prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails 
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
-Dmapred.log.dir=$HADOOP_MAPRED_LOG_DIR 
${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console 
${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=ERROR,DRFAS 
${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -v
 erbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple 
commands (fs, dfs, fsck, distcp etc)\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure 
datanodes, user to run the datanode as after dropping privileges\nexport 
HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by 
default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o 
SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs 
by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# Where log 
files are stored in the secure data environment.\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export 
HADOOP_SLAVES=${HAD
 OOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. 
 Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# 
Seconds to sleep between slave commands.  Unset by default.  This\n# can be 
useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive 
faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# 
The directory where pid files are stored. /tmp by default.\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
History server pid\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n#
 A string representing this instance of hadoop. $USER by default.\nexport 
HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  
See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from stan
 dard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql 
connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n 
 JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# 
Setting path to hdfs command line\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 
2.0\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64\n
    ", 
-            "hdfs_user": "hdfs", 
-            "user_group": "hadoop", 
+            "hdfs_user": "hdfs",
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop"
         }, 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json 
b/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
index aea185a..6c03d80 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
@@ -531,25 +531,29 @@
             "hcat_log_dir": "/var/log/webhcat", 
             "hive_aux_jars_path": 
"/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar", 
             "hive_database": "New MySQL Database"
-        }, 
+        },
+        "cluster-env": {
+            "security_enabled": "true",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop",
+            "smokeuser_keytab": 
"/etc/security/keytabs/smokeuser.headless.keytab",
+            "kinit_path_local": "/usr/bin"
+         },
         "hadoop-env": {
-            "security_enabled": "true", 
-            "namenode_opt_maxnewsize": "200m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
+            "namenode_opt_maxnewsize": "200m",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables 
here.\n\n# The only required environment variable is JAVA_HOME.  All others 
are\n# optional.  When running a distributed configuration it is best to\n# set 
JAVA_HOME in this file, so that it is correctly defined on\n# remote 
nodes.\n\n# The java implementation to use.  Required.\nexport 
JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop 
Configuration Directory\n#TODO: if env var set that can cause problems\nexport 
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n# Hadoop home 
directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# this is 
different for HDP1 #\n# Path to jsvc required by secure HDP 2.0 datanode\n# 
export JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. 
Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java 
runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# 
History server logs\nexport 
HADOOP_MAPRED_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# Command specific 
options appended to HADOOP_OPTS when specified\nexport 
HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir
 _prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails 
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
-Dmapred.log.dir=$HADOOP_MAPRED_LOG_DIR 
${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console 
${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=ERROR,DRFAS 
${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -v
 erbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple 
commands (fs, dfs, fsck, distcp etc)\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure 
datanodes, user to run the datanode as after dropping privileges\nexport 
HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by 
default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o 
SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs 
by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# Where log 
files are stored in the secure data environment.\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export 
HADOOP_SLAVES=${HAD
 OOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. 
 Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# 
Seconds to sleep between slave commands.  Unset by default.  This\n# can be 
useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive 
faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# 
The directory where pid files are stored. /tmp by default.\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
History server pid\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n#
 A string representing this instance of hadoop. $USER by default.\nexport 
HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  
See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from stan
 dard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql 
connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n 
 JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# 
Setting path to hdfs command line\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 
2.0\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64\n
    ", 
             "hdfs_user": "hdfs",
             "hdfs_principal_name": "hdfs",
             "user_group": "hadoop", 
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "smokeuser_keytab": 
"/etc/security/keytabs/smokeuser.headless.keytab",
             "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab"
         }, 
         "hbase-env": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/1.3.2/configs/secured_client.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/1.3.2/configs/secured_client.json 
b/ambari-server/src/test/python/stacks/1.3.2/configs/secured_client.json
index b1c0d85..c8e0e3b 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/secured_client.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/secured_client.json
@@ -531,25 +531,28 @@
             "hcat_log_dir": "/var/log/webhcat", 
             "hive_aux_jars_path": 
"/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar", 
             "hive_database": "New MySQL Database"
-        }, 
+        },
+        "cluster-env": {
+            "security_enabled": "true",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop",
+            "smokeuser_keytab": 
"/etc/security/keytabs/smokeuser.headless.keytab",
+            "kinit_path_local": "/usr/bin"
+        },
         "hadoop-env": {
-            "security_enabled": "true", 
-            "namenode_opt_maxnewsize": "200m", 
+            "namenode_opt_maxnewsize": "200m",
             "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
-            "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_heapsize": "1024m",
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables 
here.\n\n# The only required environment variable is JAVA_HOME.  All others 
are\n# optional.  When running a distributed configuration it is best to\n# set 
JAVA_HOME in this file, so that it is correctly defined on\n# remote 
nodes.\n\n# The java implementation to use.  Required.\nexport 
JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop 
Configuration Directory\n#TODO: if env var set that can cause problems\nexport 
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n# Hadoop home 
directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# this is 
different for HDP1 #\n# Path to jsvc required by secure HDP 2.0 datanode\n# 
export JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. 
Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java 
runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# 
History server logs\nexport 
HADOOP_MAPRED_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# Command specific 
options appended to HADOOP_OPTS when specified\nexport 
HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir
 _prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails 
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
-Dmapred.log.dir=$HADOOP_MAPRED_LOG_DIR 
${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console 
${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=ERROR,DRFAS 
${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -v
 erbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple 
commands (fs, dfs, fsck, distcp etc)\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure 
datanodes, user to run the datanode as after dropping privileges\nexport 
HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by 
default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o 
SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs 
by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# Where log 
files are stored in the secure data environment.\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export 
HADOOP_SLAVES=${HAD
 OOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. 
 Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# 
Seconds to sleep between slave commands.  Unset by default.  This\n# can be 
useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive 
faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# 
The directory where pid files are stored. /tmp by default.\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
History server pid\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n#
 A string representing this instance of hadoop. $USER by default.\nexport 
HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  
See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from stan
 dard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql 
connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n 
 JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# 
Setting path to hdfs command line\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 
2.0\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64\n
    ", 
             "hdfs_user": "hdfs",
             "hdfs_principal_name": "hdfs",
-            "user_group": "hadoop", 
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "smokeuser_keytab": 
"/etc/security/keytabs/smokeuser.headless.keytab",
             "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab"
         }, 
         "hbase-env": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
index b3bf5b8..1727d66 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
@@ -423,21 +423,23 @@
             "resourcemanager_heapsize": "1024", 
             "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
             "min_user_id": "1000"
-        }, 
+        },
+        "cluster-env": {
+            "security_enabled": "false",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop"
+        },
         "hadoop-env": {
-            "security_enabled": "false", 
-            "namenode_opt_maxnewsize": "200m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
+            "namenode_opt_maxnewsize": "200m",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables 
here.\n\n# The only required environment variable is JAVA_HOME.  All others 
are\n# optional.  When running a distributed configuration it is best to\n# set 
JAVA_HOME in this file, so that it is correctly defined on\n# remote 
nodes.\n\n# The java implementation to use.  Required.\nexport 
JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home 
directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop 
Configuration Directory\n#TODO: if env var set that can cause problems\nexport 
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different 
for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport 
JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. 
Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java 
runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# 
Command specific options appended to HADOOP_OPTS when specified\nexport 
HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTime
 Stamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console 
${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=ERROR,DRFAS 
${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize
 }} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple 
commands (fs, dfs, fsck, distcp etc)\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure 
datanodes, user to run the datanode as after dropping privileges\nexport 
HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by 
default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o 
SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs 
by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History 
server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# 
Where log files are stored in the secure data environment.\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export 
HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where ha
 doop code should be rsync'd from.  Unset by default.\n# export 
HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave 
commands.  Unset by default.  This\n# can be useful in large clusters, where, 
e.g., slave rsyncs can\n# otherwise arrive faster than the master can service 
them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are 
stored. /tmp by default.\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
History server pid\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n#
 A string representing this instance of hadoop. $USER by default.\nexport 
HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  
See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard 
classpath\nJAVA_JDBC_LIBS=\"\"\n#Add lib
 raries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 
2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add 
libraries required by oracle connector\nfor jarFile in `ls 
/usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ 
-d \"/usr/lib/tez\" ]; then\n  export 
HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n#
 Setting path to hdfs command line\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 
2.0\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64",
 
-            "hdfs_user": "hdfs", 
-            "user_group": "hadoop", 
+            "hdfs_user": "hdfs",
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 0a48d49..9cdf773 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -426,19 +426,14 @@
             "min_user_id": "1000"
         }, 
         "hadoop-env": {
-            "security_enabled": "false", 
             "namenode_opt_maxnewsize": "200m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables 
here.\n\n# The only required environment variable is JAVA_HOME.  All others 
are\n# optional.  When running a distributed configuration it is best to\n# set 
JAVA_HOME in this file, so that it is correctly defined on\n# remote 
nodes.\n\n# The java implementation to use.  Required.\nexport 
JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home 
directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop 
Configuration Directory\n#TODO: if env var set that can cause problems\nexport 
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different 
for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport 
JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. 
Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java 
runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# 
Command specific options appended to HADOOP_OPTS when specified\nexport 
HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTime
 Stamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console 
${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=ERROR,DRFAS 
${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize
 }} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple 
commands (fs, dfs, fsck, distcp etc)\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure 
datanodes, user to run the datanode as after dropping privileges\nexport 
HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by 
default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o 
SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs 
by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History 
server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# 
Where log files are stored in the secure data environment.\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export 
HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where ha
 doop code should be rsync'd from.  Unset by default.\n# export 
HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave 
commands.  Unset by default.  This\n# can be useful in large clusters, where, 
e.g., slave rsyncs can\n# otherwise arrive faster than the master can service 
them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are 
stored. /tmp by default.\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
History server pid\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n#
 A string representing this instance of hadoop. $USER by default.\nexport 
HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  
See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard 
classpath\nJAVA_JDBC_LIBS=\"\"\n#Add lib
 raries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 
2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add 
libraries required by oracle connector\nfor jarFile in `ls 
/usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ 
-d \"/usr/lib/tez\" ]; then\n  export 
HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n#
 Setting path to hdfs command line\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 
2.0\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64",
 
-            "hdfs_user": "hdfs", 
-            "user_group": "hadoop", 
+            "hdfs_user": "hdfs",
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
@@ -458,7 +453,15 @@
             "hcat_log_dir": "/var/log/webhcat", 
             "hive_database": "New MySQL Database"
         },
-        "hbase-env": {
+      "cluster-env": {
+        "security_enabled": "false",
+        "ignore_groupsusers_create": "false",
+        "smokeuser": "ambari-qa",
+        "kerberos_domain": "EXAMPLE.COM",
+        "user_group": "hadoop"
+      },
+
+      "hbase-env": {
             "hbase_pid_dir": "/var/run/hbase", 
             "hbase_user": "hbase", 
             "hbase_master_heapsize": "1024m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
 
b/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
index af007cb..138cb80 100644
--- 
a/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
+++ 
b/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
@@ -417,21 +417,23 @@
             "resourcemanager_heapsize": "1024", 
             "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
             "min_user_id": "1000"
-        }, 
+        },
+        "cluster-env": {
+          "security_enabled": "false",
+          "ignore_groupsusers_create": "false",
+          "smokeuser": "ambari-qa",
+          "kerberos_domain": "EXAMPLE.COM",
+          "user_group": "hadoop"
+        },
         "hadoop-env": {
-            "security_enabled": "false", 
-            "namenode_opt_maxnewsize": "200m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
+            "namenode_opt_maxnewsize": "200m",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables 
here.\n\n# The only required environment variable is JAVA_HOME.  All others 
are\n# optional.  When running a distributed configuration it is best to\n# set 
JAVA_HOME in this file, so that it is correctly defined on\n# remote 
nodes.\n\n# The java implementation to use.  Required.\nexport 
JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home 
directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop 
Configuration Directory\n#TODO: if env var set that can cause problems\nexport 
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different 
for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport 
JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. 
Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java 
runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# 
Command specific options appended to HADOOP_OPTS when specified\nexport 
HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTime
 Stamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console 
${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=ERROR,DRFAS 
${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize
 }} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple 
commands (fs, dfs, fsck, distcp etc)\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure 
datanodes, user to run the datanode as after dropping privileges\nexport 
HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by 
default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o 
SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs 
by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History 
server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# 
Where log files are stored in the secure data environment.\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export 
HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where ha
 doop code should be rsync'd from.  Unset by default.\n# export 
HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave 
commands.  Unset by default.  This\n# can be useful in large clusters, where, 
e.g., slave rsyncs can\n# otherwise arrive faster than the master can service 
them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are 
stored. /tmp by default.\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
History server pid\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n#
 A string representing this instance of hadoop. $USER by default.\nexport 
HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  
See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard 
classpath\nJAVA_JDBC_LIBS=\"\"\n#Add lib
 raries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 
2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add 
libraries required by oracle connector\nfor jarFile in `ls 
/usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ 
-d \"/usr/lib/tez\" ]; then\n  export 
HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n#
 Setting path to hdfs command line\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 
2.0\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64",
 
-            "hdfs_user": "hdfs", 
-            "user_group": "hadoop", 
+            "hdfs_user": "hdfs",
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
index 417946c..9103cdc 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
@@ -423,21 +423,23 @@
             "resourcemanager_heapsize": "1024", 
             "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
             "min_user_id": "1000"
-        }, 
+        },
+        "cluster-env": {
+            "security_enabled": "false",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop"
+        },
         "hadoop-env": {
-            "security_enabled": "false", 
-            "namenode_opt_maxnewsize": "200m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
+            "namenode_opt_maxnewsize": "200m",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables 
here.\n\n# The only required environment variable is JAVA_HOME.  All others 
are\n# optional.  When running a distributed configuration it is best to\n# set 
JAVA_HOME in this file, so that it is correctly defined on\n# remote 
nodes.\n\n# The java implementation to use.  Required.\nexport 
JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home 
directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop 
Configuration Directory\n#TODO: if env var set that can cause problems\nexport 
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different 
for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport 
JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. 
Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java 
runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# 
Command specific options appended to HADOOP_OPTS when specified\nexport 
HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTime
 Stamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console 
${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=ERROR,DRFAS 
${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize
 }} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple 
commands (fs, dfs, fsck, distcp etc)\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure 
datanodes, user to run the datanode as after dropping privileges\nexport 
HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by 
default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o 
SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs 
by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History 
server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# 
Where log files are stored in the secure data environment.\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export 
HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where ha
 doop code should be rsync'd from.  Unset by default.\n# export 
HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave 
commands.  Unset by default.  This\n# can be useful in large clusters, where, 
e.g., slave rsyncs can\n# otherwise arrive faster than the master can service 
them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are 
stored. /tmp by default.\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
History server pid\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n#
 A string representing this instance of hadoop. $USER by default.\nexport 
HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  
See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard 
classpath\nJAVA_JDBC_LIBS=\"\"\n#Add lib
 raries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 
2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add 
libraries required by oracle connector\nfor jarFile in `ls 
/usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ 
-d \"/usr/lib/tez\" ]; then\n  export 
HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n#
 Setting path to hdfs command line\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 
2.0\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64",
 
-            "hdfs_user": "hdfs", 
-            "user_group": "hadoop", 
+            "hdfs_user": "hdfs",
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },

Reply via email to