Repository: ambari
Updated Branches:
  refs/heads/branch-2.1 3d57232de -> 0585b5af5


http://git-wip-us.apache.org/repos/asf/ambari/blob/0585b5af/ambari-server/src/test/python/stacks/2.3/common/sparkts-host.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/common/sparkts-host.json 
b/ambari-server/src/test/python/stacks/2.3/common/sparkts-host.json
new file mode 100644
index 0000000..3915321
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.3/common/sparkts-host.json
@@ -0,0 +1,220 @@
+{
+  "href" : 
"/api/v1/hosts?fields=Hosts/*&Hosts/host_name.in(jerryshao-sts-test-1.c.pramod-thangali.internal)",
+  "items" : [
+    {
+      "href" : "/api/v1/hosts/jerryshao-sts-test-1.c.pramod-thangali.internal",
+      "Hosts" : {
+        "cluster_name" : "test",
+        "cpu_count" : 2,
+        "desired_configs" : null,
+        "disk_info" : [
+          {
+            "available" : "5646396",
+            "device" : "/dev/sda1",
+            "used" : "4017508",
+            "percent" : "42%",
+            "size" : "10188088",
+            "type" : "ext4",
+            "mountpoint" : "/"
+          },
+          {
+            "available" : "3771720",
+            "device" : "tmpfs",
+            "used" : "0",
+            "percent" : "0%",
+            "size" : "3771720",
+            "type" : "tmpfs",
+            "mountpoint" : "/dev/shm"
+          }
+        ],
+        "host_health_report" : "",
+        "host_name" : "jerryshao-sts-test-1.c.pramod-thangali.internal",
+        "host_state" : "HEALTHY",
+        "host_status" : "HEALTHY",
+        "ip" : "10.240.0.122",
+        "last_agent_env" : {
+          "stackFoldersAndFiles" : [
+            {
+              "name" : "/etc/hadoop",
+              "type" : "directory"
+            },
+            {
+              "name" : "/etc/zookeeper",
+              "type" : "directory"
+            },
+            {
+              "name" : "/var/run/hadoop",
+              "type" : "directory"
+            },
+            {
+              "name" : "/var/run/zookeeper",
+              "type" : "directory"
+            },
+            {
+              "name" : "/var/run/hadoop-yarn",
+              "type" : "directory"
+            },
+            {
+              "name" : "/var/run/hadoop-mapreduce",
+              "type" : "directory"
+            },
+            {
+              "name" : "/var/log/hadoop",
+              "type" : "directory"
+            },
+            {
+              "name" : "/var/log/zookeeper",
+              "type" : "directory"
+            },
+            {
+              "name" : "/var/log/hadoop-yarn",
+              "type" : "directory"
+            },
+            {
+              "name" : "/var/log/hadoop-mapreduce",
+              "type" : "directory"
+            },
+            {
+              "name" : "/var/lib/hadoop-hdfs",
+              "type" : "directory"
+            },
+            {
+              "name" : "/var/lib/hadoop-yarn",
+              "type" : "directory"
+            },
+            {
+              "name" : "/var/lib/hadoop-mapreduce",
+              "type" : "directory"
+            },
+            {
+              "name" : "/tmp/hadoop-hdfs",
+              "type" : "directory"
+            },
+            {
+              "name" : "/hadoop/zookeeper",
+              "type" : "directory"
+            },
+            {
+              "name" : "/hadoop/hdfs",
+              "type" : "directory"
+            },
+            {
+              "name" : "/hadoop/yarn",
+              "type" : "directory"
+            }
+          ],
+          "alternatives" : [ ],
+          "existingUsers" : [
+            {
+              "userHomeDir" : "/home/zookeeper",
+              "userStatus" : "Available",
+              "userName" : "zookeeper"
+            },
+            {
+              "userHomeDir" : "/home/ambari-qa",
+              "userStatus" : "Available",
+              "userName" : "ambari-qa"
+            },
+            {
+              "userHomeDir" : "/home/hdfs",
+              "userStatus" : "Available",
+              "userName" : "hdfs"
+            },
+            {
+              "userHomeDir" : "/home/yarn",
+              "userStatus" : "Available",
+              "userName" : "yarn"
+            },
+            {
+              "userHomeDir" : "/home/mapred",
+              "userStatus" : "Available",
+              "userName" : "mapred"
+            }
+          ],
+          "existingRepos" : [ ],
+          "installedPackages" : [ ],
+          "hostHealth" : {
+            "activeJavaProcs" : [
+              {
+                "user" : "hdfs",
+                "pid" : 4485,
+                "command" : "/usr/jdk64/jdk1.8.0_60/bin/java -Dproc_datanode 
-Xmx1024m -Dhdp.version=2.3.4.0-3335 -Djava.net.preferIPv4Stack=true 
-Dhdp.version= -Djava.net.preferIPv4Stack=true -Dhdp.version= 
-Djava.net.preferIPv4Stack=true -Dhadoop.log.dir=/var/log/hadoop/hdfs 
-Dhadoop.log.file=hadoop.log -Dhadoop.home.dir=/usr/hdp/2.3.4.0-3335/hadoop 
-Dhadoop.id.str=hdfs -Dhadoop.root.logger=INFO,console 
-Djava.library.path=:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native
 -Dhadoop.policy.file=hadoop-policy.xml -Djava.net.preferIPv4Stack=true 
-Dhdp.version=2.3.4.0-3335 -Dhadoop.log.dir=/var/log/hadoop/hdfs 
-Dhadoop.log.file=hadoop-hdfs-datanode-jerryshao-sts-test-1.log 
-Dhadoop.home.dir=/usr/hdp/2.3.4.0-3335/hadoop -Dhadoop.id.str=hdfs 
-Dhadoop.root.logger=INFO,RFA 
-Djava.library.path=:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/us
 r/hdp/2.3.4.0-3335/hadoop/lib/native -Dhadoop.policy.file=hadoop-policy.xml 
-Djava.net.preferIPv4Stack=true -server -server -XX:ParallelGCThreads=4 
-XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/hdfs/hs_err_pid%p.log 
-XX:NewSize=200m -XX:MaxNewSize=200m 
-Xloggc:/var/log/hadoop/hdfs/gc.log-201511260315 -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms1024m 
-Xmx1024m -Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT -server -XX:ParallelGCThreads=4 
-XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/hdfs/hs_err_pid%p.log 
-XX:NewSize=200m -XX:MaxNewSize=200m 
-Xloggc:/var/log/hadoop/hdfs/gc.log-201511260315 -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms1024m 
-Xmx1024m -Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT -server -XX:ParallelGCThreads=4 
-XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/hdfs/hs_err_pid%p.log 
-XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/v
 ar/log/hadoop/hdfs/gc.log-201511260315 -verbose:gc -XX:+PrintGCDetails 
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms1024m -Xmx1024m 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
-Dhadoop.security.logger=INFO,RFAS 
org.apache.hadoop.hdfs.server.datanode.DataNode",
+                "hadoop" : true
+              },
+              {
+                "user" : "zookeeper",
+                "pid" : 4621,
+                "command" : "/usr/jdk64/jdk1.8.0_60/bin/java 
-Dzookeeper.log.dir=/var/log/zookeeper 
-Dzookeeper.log.file=zookeeper-zookeeper-server-jerryshao-sts-test-1.log 
-Dzookeeper.root.logger=INFO,CONSOLE -cp 
/usr/hdp/current/zookeeper-server/bin/../build/classes:/usr/hdp/current/zookeeper-server/bin/../build/lib/*.jar:/usr/hdp/current/zookeeper-server/bin/../lib/xercesMinimal-1.9.6.2.jar:/usr/hdp/current/zookeeper-server/bin/../lib/wagon-provider-api-2.4.jar:/usr/hdp/current/zookeeper-server/bin/../lib/wagon-http-shared4-2.4.jar:/usr/hdp/current/zookeeper-server/bin/../lib/wagon-http-shared-1.0-beta-6.jar:/usr/hdp/current/zookeeper-server/bin/../lib/wagon-http-lightweight-1.0-beta-6.jar:/usr/hdp/current/zookeeper-server/bin/../lib/wagon-http-2.4.jar:/usr/hdp/current/zookeeper-server/bin/../lib/wagon-file-1.0-beta-6.jar:/usr/hdp/current/zookeeper-server/bin/../lib/slf4j-log4j12-1.6.1.jar:/usr/hdp/current/zookeeper-server/bin/../lib/slf4j-api-1.6.1.jar:/usr/hdp/current/zookeeper
 
-server/bin/../lib/plexus-utils-3.0.8.jar:/usr/hdp/current/zookeeper-server/bin/../lib/plexus-interpolation-1.11.jar:/usr/hdp/current/zookeeper-server/bin/../lib/plexus-container-default-1.0-alpha-9-stable-1.jar:/usr/hdp/current/zookeeper-server/bin/../lib/netty-3.7.0.Final.jar:/usr/hdp/current/zookeeper-server/bin/../lib/nekohtml-1.9.6.2.jar:/usr/hdp/current/zookeeper-server/bin/../lib/maven-settings-2.2.1.jar:/usr/hdp/current/zookeeper-server/bin/../lib/maven-repository-metadata-2.2.1.jar:/usr/hdp/current/zookeeper-server/bin/../lib/maven-project-2.2.1.jar:/usr/hdp/current/zookeeper-server/bin/../lib/maven-profile-2.2.1.jar:/usr/hdp/current/zookeeper-server/bin/../lib/maven-plugin-registry-2.2.1.jar:/usr/hdp/current/zookeeper-server/bin/../lib/maven-model-2.2.1.jar:/usr/hdp/current/zookeeper-server/bin/../lib/maven-error-diagnostics-2.2.1.jar:/usr/hdp/current/zookeeper-server/bin/../lib/maven-artifact-manager-2.2.1.jar:/usr/hdp/current/zookeeper-server/bin/../lib/maven-artifact-2.
 
2.1.jar:/usr/hdp/current/zookeeper-server/bin/../lib/maven-ant-tasks-2.1.3.jar:/usr/hdp/current/zookeeper-server/bin/../lib/log4j-1.2.16.jar:/usr/hdp/current/zookeeper-server/bin/../lib/jsoup-1.7.1.jar:/usr/hdp/current/zookeeper-server/bin/../lib/jline-0.9.94.jar:/usr/hdp/current/zookeeper-server/bin/../lib/httpcore-4.2.3.jar:/usr/hdp/current/zookeeper-server/bin/../lib/httpclient-4.2.3.jar:/usr/hdp/current/zookeeper-server/bin/../lib/commons-logging-1.1.1.jar:/usr/hdp/current/zookeeper-server/bin/../lib/commons-io-2.2.jar:/usr/hdp/current/zookeeper-server/bin/../lib/commons-codec-1.6.jar:/usr/hdp/current/zookeeper-server/bin/../lib/classworlds-1.1-alpha-2.jar:/usr/hdp/current/zookeeper-server/bin/../lib/backport-util-concurrent-3.1.jar:/usr/hdp/current/zookeeper-server/bin/../lib/ant-launcher-1.8.0.jar:/usr/hdp/current/zookeeper-server/bin/../lib/ant-1.8.0.jar:/usr/hdp/current/zookeeper-server/bin/../zookeeper-3.4.6.2.3.4.0-3335.jar:/usr/hdp/current/zookeeper-server/bin/../src/java
 
/lib/*.jar:/usr/hdp/current/zookeeper-server/conf::/usr/share/zookeeper/*:/usr/share/zookeeper/*
 -Xmx1024m -Dcom.sun.management.jmxremote 
-Dcom.sun.management.jmxremote.local.only=false 
org.apache.zookeeper.server.quorum.QuorumPeerMain 
/usr/hdp/current/zookeeper-server/conf/zoo.cfg",
+                "hadoop" : true
+              },
+              {
+                "user" : "hdfs",
+                "pid" : 4798,
+                "command" : "/usr/jdk64/jdk1.8.0_60/bin/java -Dproc_namenode 
-Xmx1024m -Dhdp.version=2.3.4.0-3335 -Djava.net.preferIPv4Stack=true 
-Dhdp.version= -Djava.net.preferIPv4Stack=true -Dhdp.version= 
-Djava.net.preferIPv4Stack=true -Dhadoop.log.dir=/var/log/hadoop/hdfs 
-Dhadoop.log.file=hadoop.log -Dhadoop.home.dir=/usr/hdp/2.3.4.0-3335/hadoop 
-Dhadoop.id.str=hdfs -Dhadoop.root.logger=INFO,console 
-Djava.library.path=:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native
 -Dhadoop.policy.file=hadoop-policy.xml -Djava.net.preferIPv4Stack=true 
-Dhdp.version=2.3.4.0-3335 -Dhadoop.log.dir=/var/log/hadoop/hdfs 
-Dhadoop.log.file=hadoop-hdfs-namenode-jerryshao-sts-test-1.log 
-Dhadoop.home.dir=/usr/hdp/2.3.4.0-3335/hadoop -Dhadoop.id.str=hdfs 
-Dhadoop.root.logger=INFO,RFA 
-Djava.library.path=:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/us
 r/hdp/2.3.4.0-3335/hadoop/lib/native -Dhadoop.policy.file=hadoop-policy.xml 
-Djava.net.preferIPv4Stack=true -server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/hdfs/hs_err_pid%p.log 
-XX:NewSize=128m -XX:MaxNewSize=128m 
-Xloggc:/var/log/hadoop/hdfs/gc.log-201511260315 -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms1024m 
-Xmx1024m -Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT 
-XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\"
 -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 -server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile=/var/log/hadoop/hdfs/hs_err_pid%p.log -XX:NewSize=128m 
-XX:MaxNewSize=128m -Xloggc:/var/log/hadoop/hdfs/gc.log-201511260315 
-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms1024m -Xmx1024m -Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT -XX:OnOutOfMemoryError=\"/usr
 /hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" 
-Dorg.mortbay.jetty.Request.maxFormContentSize=-1 -server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile=/var/log/hadoop/hdfs/hs_err_pid%p.log -XX:NewSize=128m 
-XX:MaxNewSize=128m -Xloggc:/var/log/hadoop/hdfs/gc.log-201511260315 
-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms1024m -Xmx1024m -Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT 
-XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\"
 -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 
-Dhadoop.security.logger=INFO,RFAS 
org.apache.hadoop.hdfs.server.namenode.NameNode",
+                "hadoop" : true
+              },
+              {
+                "user" : "yarn",
+                "pid" : 5976,
+                "command" : "/usr/jdk64/jdk1.8.0_60/bin/java 
-Dproc_timelineserver -Xmx1024m -Dhdp.version=2.3.4.0-3335 
-Dhadoop.log.dir=/var/log/hadoop-yarn/yarn 
-Dyarn.log.dir=/var/log/hadoop-yarn/yarn 
-Dhadoop.log.file=yarn-yarn-timelineserver-jerryshao-sts-test-1.log 
-Dyarn.log.file=yarn-yarn-timelineserver-jerryshao-sts-test-1.log 
-Dyarn.home.dir= -Dyarn.id.str=yarn -Dhadoop.root.logger=INFO,EWMA,RFA 
-Dyarn.root.logger=INFO,EWMA,RFA 
-Djava.library.path=:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native:/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native:/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir
 -Dyarn.policy.file=hadoop-policy.xml 
-Djava.io.tmpdir=/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir 
-Dhadoop.log.dir=/var/log/hadoop-yarn/yarn 
-Dyarn.log.dir=/var/log/hadoop-yarn/yarn 
-Dhadoop.log.file=yarn-yarn-timelineserver-jerryshao-sts-test-
 1.log -Dyarn.log.file=yarn-yarn-timelineserver-jerryshao-sts-test-1.log 
-Dyarn.home.dir=/usr/hdp/current/hadoop-yarn-timelineserver 
-Dhadoop.home.dir=/usr/hdp/2.3.4.0-3335/hadoop 
-Dhadoop.root.logger=INFO,EWMA,RFA -Dyarn.root.logger=INFO,EWMA,RFA 
-Djava.library.path=:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native:/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native:/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir
 -classpath 
/usr/hdp/current/hadoop-client/conf:/usr/hdp/current/hadoop-client/conf:/usr/hdp/current/hadoop-client/conf:/usr/hdp/2.3.4.0-3335/hadoop/lib/*:/usr/hdp/2.3.4.0-3335/hadoop/.//*:/usr/hdp/2.3.4.0-3335/hadoop-hdfs/./:/usr/hdp/2.3.4.0-3335/hadoop-hdfs/lib/*:/usr/hdp/2.3.4.0-3335/hadoop-hdfs/.//*:/usr/hdp/2.3.4.0-3335/hadoop-yarn/lib/*:/usr/hdp/2.3.4.0-3335/hadoop-yarn/.//*:/usr/hdp/2.3.4.0-3335/hadoop-mapreduce/lib/*:/usr/hdp/2.3.4.
 
0-3335/hadoop-mapreduce/.//*:/usr/hdp/current/hadoop-yarn-timelineserver/.//*:/usr/hdp/current/hadoop-yarn-timelineserver/lib/*:/usr/hdp/current/hadoop-client/conf/timelineserver-config/log4j.properties
 
org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer",
+                "hadoop" : true
+              },
+              {
+                "user" : "mapred",
+                "pid" : 6811,
+                "command" : "/usr/jdk64/jdk1.8.0_60/bin/java 
-Dproc_historyserver -Xmx900m -Dhdp.version=2.3.4.0-3335 
-Djava.net.preferIPv4Stack=true 
-Djava.io.tmpdir=/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir -Dhdp.version= 
-Dhdp.version= -Djava.net.preferIPv4Stack=true 
-Dhadoop.log.dir=/var/log/hadoop/mapred -Dhadoop.log.file=hadoop.log 
-Dhadoop.home.dir=/usr/hdp/2.3.4.0-3335/hadoop -Dhadoop.id.str=mapred 
-Dhadoop.root.logger=INFO,console 
-Djava.library.path=:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native
 -Dhadoop.policy.file=hadoop-policy.xml -Djava.net.preferIPv4Stack=true 
-Dhadoop.log.dir=/var/log/hadoop-mapreduce/mapred -Dhadoop.log.file=hadoop.log 
-Dhadoop.root.logger=INFO,console -Dhadoop.id.str=mapred 
-Dhdp.version=2.3.4.0-3335 -Dhadoop.log.dir=/var/log/hadoop/mapred 
-Dhadoop.log.file=hadoop.log -Dhadoop.home.dir=/usr/hdp/2.3.4.0-3335/hadoop 
-Dhadoop.id.str=mapred -Dhadoop.root.logger=INFO,console 
-Djava.library.path=:/us
 
r/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native:/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native
 -Dhadoop.policy.file=hadoop-policy.xml -Djava.net.preferIPv4Stack=true 
-Dhadoop.log.dir=/var/log/hadoop-mapreduce/mapred 
-Dhadoop.log.file=mapred-mapred-historyserver-jerryshao-sts-test-1.log 
-Dhadoop.root.logger=INFO,RFA -Dmapred.jobsummary.logger=INFO,JSA 
-Dhadoop.security.logger=INFO,NullAppender 
org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer",
+                "hadoop" : true
+              },
+              {
+                "user" : "yarn",
+                "pid" : 7032,
+                "command" : "/usr/jdk64/jdk1.8.0_60/bin/java 
-Dproc_resourcemanager -Xmx1024m -Dhdp.version=2.3.4.0-3335 
-Dhadoop.log.dir=/var/log/hadoop-yarn/yarn 
-Dyarn.log.dir=/var/log/hadoop-yarn/yarn 
-Dhadoop.log.file=yarn-yarn-resourcemanager-jerryshao-sts-test-1.log 
-Dyarn.log.file=yarn-yarn-resourcemanager-jerryshao-sts-test-1.log 
-Dyarn.home.dir= -Dyarn.id.str=yarn -Dhadoop.root.logger=INFO,EWMA,RFA 
-Dyarn.root.logger=INFO,EWMA,RFA 
-Djava.library.path=:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native:/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native:/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir
 -Dyarn.policy.file=hadoop-policy.xml 
-Djava.io.tmpdir=/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir 
-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY 
-Dhadoop.log.dir=/var/log/hadoop-yarn/yarn -Dyarn.log.dir=/var/log/hadoop-yarn/y
 arn -Dhadoop.log.file=yarn-yarn-resourcemanager-jerryshao-sts-test-1.log 
-Dyarn.log.file=yarn-yarn-resourcemanager-jerryshao-sts-test-1.log 
-Dyarn.home.dir=/usr/hdp/current/hadoop-yarn-resourcemanager 
-Dhadoop.home.dir=/usr/hdp/2.3.4.0-3335/hadoop 
-Dhadoop.root.logger=INFO,EWMA,RFA -Dyarn.root.logger=INFO,EWMA,RFA 
-Djava.library.path=:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native:/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native:/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir
 -classpath 
/usr/hdp/current/hadoop-client/conf:/usr/hdp/current/hadoop-client/conf:/usr/hdp/current/hadoop-client/conf:/usr/hdp/2.3.4.0-3335/hadoop/lib/*:/usr/hdp/2.3.4.0-3335/hadoop/.//*:/usr/hdp/2.3.4.0-3335/hadoop-hdfs/./:/usr/hdp/2.3.4.0-3335/hadoop-hdfs/lib/*:/usr/hdp/2.3.4.0-3335/hadoop-hdfs/.//*:/usr/hdp/2.3.4.0-3335/hadoop-yarn/lib/*:/usr/hdp/2.3.4.0-3335/hadoop-y
 
arn/.//*:/usr/hdp/2.3.4.0-3335/hadoop-mapreduce/lib/*:/usr/hdp/2.3.4.0-3335/hadoop-mapreduce/.//*:/usr/hdp/current/hadoop-yarn-resourcemanager/.//*:/usr/hdp/current/hadoop-yarn-resourcemanager/lib/*:/usr/hdp/current/hadoop-client/conf/rm-config/log4j.properties
 org.apache.hadoop.yarn.server.resourcemanager.ResourceManager",
+                "hadoop" : true
+              },
+              {
+                "user" : "hdfs",
+                "pid" : 7178,
+                "command" : "/usr/jdk64/jdk1.8.0_60/bin/java 
-Dproc_secondarynamenode -Xmx1024m -Dhdp.version=2.3.4.0-3335 
-Djava.net.preferIPv4Stack=true -Dhdp.version= -Djava.net.preferIPv4Stack=true 
-Dhdp.version= -Djava.net.preferIPv4Stack=true 
-Dhadoop.log.dir=/var/log/hadoop/hdfs -Dhadoop.log.file=hadoop.log 
-Dhadoop.home.dir=/usr/hdp/2.3.4.0-3335/hadoop -Dhadoop.id.str=hdfs 
-Dhadoop.root.logger=INFO,console 
-Djava.library.path=:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native
 -Dhadoop.policy.file=hadoop-policy.xml -Djava.net.preferIPv4Stack=true 
-Dhdp.version=2.3.4.0-3335 -Dhadoop.log.dir=/var/log/hadoop/hdfs 
-Dhadoop.log.file=hadoop-hdfs-secondarynamenode-jerryshao-sts-test-1.log 
-Dhadoop.home.dir=/usr/hdp/2.3.4.0-3335/hadoop -Dhadoop.id.str=hdfs 
-Dhadoop.root.logger=INFO,RFA 
-Djava.library.path=:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/
 Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native 
-Dhadoop.policy.file=hadoop-policy.xml -Djava.net.preferIPv4Stack=true -server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile=/var/log/hadoop/hdfs/hs_err_pid%p.log -XX:NewSize=128m 
-XX:MaxNewSize=128m -Xloggc:/var/log/hadoop/hdfs/gc.log-201511260316 
-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms1024m -Xmx1024m -Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT 
-XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\"
 -server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile=/var/log/hadoop/hdfs/hs_err_pid%p.log -XX:NewSize=128m 
-XX:MaxNewSize=128m -Xloggc:/var/log/hadoop/hdfs/gc.log-201511260316 
-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms1024m -Xmx1024m -Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT -XX:OnOutOfMemoryError=\"/usr/hdp/current/
 hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" -server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile=/var/log/hadoop/hdfs/hs_err_pid%p.log -XX:NewSize=128m 
-XX:MaxNewSize=128m -Xloggc:/var/log/hadoop/hdfs/gc.log-201511260316 
-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms1024m -Xmx1024m -Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT 
-XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\"
 -Dhadoop.security.logger=INFO,RFAS 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode",
+                "hadoop" : true
+              },
+              {
+                "user" : "yarn",
+                "pid" : 7792,
+                "command" : "/usr/jdk64/jdk1.8.0_60/bin/java 
-Dproc_nodemanager -Xmx1024m -Dhdp.version=2.3.4.0-3335 
-Dhadoop.log.dir=/var/log/hadoop-yarn/yarn 
-Dyarn.log.dir=/var/log/hadoop-yarn/yarn 
-Dhadoop.log.file=yarn-yarn-nodemanager-jerryshao-sts-test-1.log 
-Dyarn.log.file=yarn-yarn-nodemanager-jerryshao-sts-test-1.log -Dyarn.home.dir= 
-Dyarn.id.str=yarn -Dhadoop.root.logger=INFO,EWMA,RFA 
-Dyarn.root.logger=INFO,EWMA,RFA 
-Djava.library.path=:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native:/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native:/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir
 -Dyarn.policy.file=hadoop-policy.xml 
-Djava.io.tmpdir=/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir -server 
-Dhadoop.log.dir=/var/log/hadoop-yarn/yarn 
-Dyarn.log.dir=/var/log/hadoop-yarn/yarn 
-Dhadoop.log.file=yarn-yarn-nodemanager-jerryshao-sts-test-1.lo
 g -Dyarn.log.file=yarn-yarn-nodemanager-jerryshao-sts-test-1.log 
-Dyarn.home.dir=/usr/hdp/current/hadoop-yarn-nodemanager 
-Dhadoop.home.dir=/usr/hdp/2.3.4.0-3335/hadoop 
-Dhadoop.root.logger=INFO,EWMA,RFA -Dyarn.root.logger=INFO,EWMA,RFA 
-Djava.library.path=:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native:/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir:/usr/hdp/2.3.4.0-3335/hadoop/lib/native/Linux-amd64-64:/usr/hdp/2.3.4.0-3335/hadoop/lib/native:/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir
 -classpath 
/usr/hdp/current/hadoop-client/conf:/usr/hdp/current/hadoop-client/conf:/usr/hdp/current/hadoop-client/conf:/usr/hdp/2.3.4.0-3335/hadoop/lib/*:/usr/hdp/2.3.4.0-3335/hadoop/.//*:/usr/hdp/2.3.4.0-3335/hadoop-hdfs/./:/usr/hdp/2.3.4.0-3335/hadoop-hdfs/lib/*:/usr/hdp/2.3.4.0-3335/hadoop-hdfs/.//*:/usr/hdp/2.3.4.0-3335/hadoop-yarn/lib/*:/usr/hdp/2.3.4.0-3335/hadoop-yarn/.//*:/usr/hdp/2.3.4.0-3335/hadoop-mapreduce/lib/*:/usr/hdp/2.3.4.0-3335/had
 
oop-mapreduce/.//*:/usr/hdp/current/hadoop-yarn-nodemanager/.//*:/usr/hdp/current/hadoop-yarn-nodemanager/lib/*:/usr/hdp/current/hadoop-client/conf/nm-config/log4j.properties
 org.apache.hadoop.yarn.server.nodemanager.NodeManager",
+                "hadoop" : true
+              }
+            ],
+            "agentTimeStampAtReporting" : 1448509855951,
+            "serverTimeStampAtReporting" : 1448509856029,
+            "liveServices" : [
+              {
+                "desc" : "",
+                "name" : "ntpd",
+                "status" : "Healthy"
+              }
+            ]
+          },
+          "umask" : 18,
+          "transparentHugePage" : "always",
+          "firewallRunning" : true,
+          "firewallName" : "iptables",
+          "reverseLookup" : true
+        },
+        "last_heartbeat_time" : 1448509896318,
+        "last_registration_time" : 1448509844933,
+        "os_arch" : "x86_64",
+        "os_family" : "redhat6",
+        "os_type" : "centos6",
+        "ph_cpu_count" : 2,
+        "public_host_name" : "jerryshao-sts-test-1.c.pramod-thangali.internal",
+        "rack_info" : "/default-rack",
+        "recovery_report" : {
+          "summary" : "RECOVERABLE",
+          "component_reports" : [ ]
+        },
+        "recovery_summary" : "RECOVERABLE",
+        "total_mem" : 7543444
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/0585b5af/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py 
b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
index 99cfa6c..f018819 100644
--- a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
@@ -56,7 +56,7 @@ class TestHDP23StackAdvisor(TestCase):
     file = os.path.join(self.testDirectory, filename)
     with open(file, 'rb') as f:
       data = json.load(f)
-    return data    
+    return data
 
   @patch('__builtin__.open')
   @patch('os.path.exists')
@@ -81,13 +81,13 @@ class TestHDP23StackAdvisor(TestCase):
 
   def test_createComponentLayoutRecommendations_hawq_1_Host(self):
     """ Test that HAWQSTANDBY is not recommended on a single node cluster """
-    
+
     services = self.load_json("services-hawq-1-host.json")
     componentsListList = [service["components"] for service in 
services["services"]]
     componentsList = [item for sublist in componentsListList for item in 
sublist]
     componentNames = [component["StackServiceComponents"]["component_name"] 
for component in componentsList]
     self.assertTrue('HAWQSTANDBY' in componentNames)
-    
+
     hosts = self.load_json("hosts-1-host.json")
     hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
     self.assertEquals(len(hostsList), 1)
@@ -96,9 +96,9 @@ class TestHDP23StackAdvisor(TestCase):
 
     recommendedComponentsListList = [hostgroup["components"] for hostgroup in 
recommendations["blueprint"]["host_groups"]]
     recommendedComponents = [item["name"] for sublist in 
recommendedComponentsListList for item in sublist]
-    self.assertTrue('HAWQMASTER' in recommendedComponents) 
-    self.assertFalse('HAWQSTANDBY' in recommendedComponents) 
-    self.assertTrue('HAWQSEGMENT' in recommendedComponents) 
+    self.assertTrue('HAWQMASTER' in recommendedComponents)
+    self.assertFalse('HAWQSTANDBY' in recommendedComponents)
+    self.assertTrue('HAWQSEGMENT' in recommendedComponents)
 
 
   def test_createComponentLayoutRecommendations_hawq_3_Hosts(self):
@@ -109,19 +109,19 @@ class TestHDP23StackAdvisor(TestCase):
     componentsList = [item for sublist in componentsListList for item in 
sublist]
     componentNames = [component["StackServiceComponents"]["component_name"] 
for component in componentsList]
     self.assertTrue('HAWQSTANDBY' in componentNames)
-    
+
     hosts = self.load_json("hosts-3-hosts.json")
     hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
     self.assertEquals(len(hostsList), 3)
 
     recommendations = 
self.stackAdvisor.createComponentLayoutRecommendations(services, hosts)
-    
+
     recommendedComponentsListList = [hostgroup["components"] for hostgroup in 
recommendations["blueprint"]["host_groups"]]
     recommendedComponents = [item["name"] for sublist in 
recommendedComponentsListList for item in sublist]
-    self.assertTrue('HAWQMASTER' in recommendedComponents) 
-    self.assertTrue('HAWQSTANDBY' in recommendedComponents) 
+    self.assertTrue('HAWQMASTER' in recommendedComponents)
+    self.assertTrue('HAWQSTANDBY' in recommendedComponents)
     self.assertTrue('HAWQSEGMENT' in recommendedComponents)
-    
+
     # make sure master components are not collocated
     for sublist in recommendedComponentsListList:
       hostComponents = [item["name"] for item in sublist]
@@ -135,28 +135,28 @@ class TestHDP23StackAdvisor(TestCase):
     componentsListList = [service["components"] for service in 
services["services"]]
     componentsList = [item for sublist in componentsListList for item in 
sublist]
     componentNames = [component["StackServiceComponents"]["component_name"] 
for component in componentsList]
-    self.assertFalse('HAWQMASTER' in componentNames) 
-    self.assertFalse('HAWQSTANDBY' in componentNames) 
+    self.assertFalse('HAWQMASTER' in componentNames)
+    self.assertFalse('HAWQSTANDBY' in componentNames)
     self.assertFalse('HAWQSEGMENT' in componentNames)
-    
+
     hosts = self.load_json("hosts-3-hosts.json")
     hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
     self.assertEquals(len(hostsList), 3)
 
     recommendations = 
self.stackAdvisor.createComponentLayoutRecommendations(services, hosts)
-    
+
     recommendedComponentsListList = [hostgroup["components"] for hostgroup in 
recommendations["blueprint"]["host_groups"]]
     recommendedComponents = [item["name"] for sublist in 
recommendedComponentsListList for item in sublist]
-    self.assertFalse('HAWQMASTER' in recommendedComponents) 
-    self.assertFalse('HAWQSTANDBY' in recommendedComponents) 
+    self.assertFalse('HAWQMASTER' in recommendedComponents)
+    self.assertFalse('HAWQSTANDBY' in recommendedComponents)
     self.assertFalse('HAWQSEGMENT' in recommendedComponents)
-      
+
 
   def fqdn_mock_result(value=None):
       return 'c6401.ambari.apache.org' if value is None else value
 
-      
-  @patch('socket.getfqdn', side_effect=fqdn_mock_result)  
+
+  @patch('socket.getfqdn', side_effect=fqdn_mock_result)
   def test_getComponentLayoutValidations_hawq_3_Hosts(self, socket_mock):
     """ Test layout validations for HAWQ components on a 3-node cluster """
 
@@ -169,14 +169,14 @@ class TestHDP23StackAdvisor(TestCase):
     self.assertEquals(len(hawqMasterHosts[0]), 1)
     self.assertEquals(len(hawqStandbyHosts[0]), 1)
     self.assertNotEquals(hawqMasterHosts[0][0], hawqStandbyHosts[0][0])
-    
+
     hosts = self.load_json("hosts-3-hosts.json")
     hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
     self.assertEquals(len(hostsList), 3)
 
     validations = self.stackAdvisor.getComponentLayoutValidations(services, 
hosts)
     self.assertEquals(len(validations), 0)
-    
+
     # case-2: HAWQ masters are collocated
     services = self.load_json("services-master_standby_colo-3-hosts.json")
     componentsListList = [service["components"] for service in 
services["services"]]
@@ -186,12 +186,12 @@ class TestHDP23StackAdvisor(TestCase):
     self.assertEquals(len(hawqMasterHosts[0]), 1)
     self.assertEquals(len(hawqStandbyHosts[0]), 1)
     self.assertEquals(hawqMasterHosts[0][0], hawqStandbyHosts[0][0])
-    
+
     validations = self.stackAdvisor.getComponentLayoutValidations(services, 
hosts)
     self.assertEquals(len(validations), 1)
     expected={'component-name': 'HAWQSTANDBY', 'message': 'HAWQ Standby Master 
and HAWQ Master should not be deployed on the same host.', 'type': 
'host-component', 'host': 'c6403.ambari.apache.org', 'level': 'ERROR'}
     self.assertEquals(validations[0], expected)
-    
+
     # case-3: HAWQ Master and Ambari Server are collocated
     services = self.load_json("services-master_ambari_colo-3-hosts.json")
     componentsListList = [service["components"] for service in 
services["services"]]
@@ -202,7 +202,7 @@ class TestHDP23StackAdvisor(TestCase):
     self.assertEquals(len(hawqStandbyHosts[0]), 1)
     self.assertNotEquals(hawqMasterHosts[0][0], hawqStandbyHosts[0][0])
     self.assertEquals(hawqMasterHosts[0][0], "c6401.ambari.apache.org")
-    
+
     validations = self.stackAdvisor.getComponentLayoutValidations(services, 
hosts)
     self.assertEquals(len(validations), 1)
     expected={'component-name': 'HAWQMASTER', 'message': 'HAWQ Master and 
Ambari Server should not be deployed on the same host. If you leave them 
collocated, make sure to set HAWQ Master Port property to a value different 
from the port number used by Ambari Server database.', 'type': 
'host-component', 'host': 'c6401.ambari.apache.org', 'level': 'WARN'}
@@ -218,14 +218,14 @@ class TestHDP23StackAdvisor(TestCase):
     self.assertEquals(len(hawqStandbyHosts[0]), 1)
     self.assertNotEquals(hawqMasterHosts[0][0], hawqStandbyHosts[0][0])
     self.assertEquals(hawqStandbyHosts[0][0], "c6401.ambari.apache.org")
-    
+
     validations = self.stackAdvisor.getComponentLayoutValidations(services, 
hosts)
     self.assertEquals(len(validations), 1)
     expected={'component-name': 'HAWQSTANDBY', 'message': 'HAWQ Standby Master 
and Ambari Server should not be deployed on the same host. If you leave them 
collocated, make sure to set HAWQ Master Port property to a value different 
from the port number used by Ambari Server database.', 'type': 
'host-component', 'host': 'c6401.ambari.apache.org', 'level': 'WARN'}
     self.assertEquals(validations[0], expected)
 
 
-  @patch('socket.getfqdn', side_effect=fqdn_mock_result)  
+  @patch('socket.getfqdn', side_effect=fqdn_mock_result)
   def test_getComponentLayoutValidations_nohawq_3_Hosts(self, socket_mock):
     """ Test no failures when there are no HAWQ components on a 3-node cluster 
"""
 
@@ -237,7 +237,7 @@ class TestHDP23StackAdvisor(TestCase):
     hawqStandbyHosts = [component["StackServiceComponents"]["hostnames"] for 
component in componentsList if 
component["StackServiceComponents"]["component_name"] == "HAWQSTANDBY"]
     self.assertEquals(len(hawqMasterHosts), 0)
     self.assertEquals(len(hawqStandbyHosts), 0)
-    
+
     hosts = self.load_json("hosts-3-hosts.json")
     hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
     self.assertEquals(len(hostsList), 3)
@@ -245,7 +245,44 @@ class TestHDP23StackAdvisor(TestCase):
     validations = self.stackAdvisor.getComponentLayoutValidations(services, 
hosts)
     self.assertEquals(len(validations), 0)
 
-                           
+
+  @patch('socket.getfqdn', side_effect=fqdn_mock_result)
+  def test_getComponentLayoutValidations_sparkts_no_hive(self, socket_mock):
+    """ Test SparkTS is picked when Hive is not installed """
+
+    hosts = self.load_json("sparkts-host.json")
+    services = self.load_json("services-sparkts.json")
+    componentsListList = [service["components"] for service in 
services["services"]]
+    componentsList = [item for sublist in componentsListList for item in 
sublist]
+
+    sparkTS = [component["StackServiceComponents"]["hostnames"] for component 
in componentsList if component["StackServiceComponents"]["component_name"] == 
"SPARK_THRIFTSERVER"]
+    hiveMetaStore = [component["StackServiceComponents"]["hostnames"] for 
component in componentsList if 
component["StackServiceComponents"]["component_name"] == "HIVE_METASTORE"]
+    self.assertEquals(len(sparkTS), 1)
+    self.assertEquals(len(hiveMetaStore), 0)
+
+    validations = self.stackAdvisor.getComponentLayoutValidations(services, 
hosts)
+    expected = {'component-name': 'SPARK_THRIFTSERVER', 'message': 
'SPARK_THRIFTSERVER requires HIVE_METASTORE to be selected/deployed.', 'type': 
'host-component', 'level': 'ERROR'}
+    self.assertEquals(validations[0], expected)
+
+
+  @patch('socket.getfqdn', side_effect=fqdn_mock_result)
+  def test_getComponentLayoutValidations_sparkts_with_hive(self, socket_mock):
+    """ Test SparkTS is picked when Hive is installed """
+
+    hosts = self.load_json("sparkts-host.json")
+    services = self.load_json("services-sparkts-hive.json")
+    componentsListList = [service["components"] for service in 
services["services"]]
+    componentsList = [item for sublist in componentsListList for item in 
sublist]
+
+    sparkTS = [component["StackServiceComponents"]["hostnames"] for component 
in componentsList if component["StackServiceComponents"]["component_name"] == 
"SPARK_THRIFTSERVER"]
+    hiveMetaStore = [component["StackServiceComponents"]["hostnames"] for 
component in componentsList if 
component["StackServiceComponents"]["component_name"] == "HIVE_METASTORE"]
+    self.assertEquals(len(sparkTS), 1)
+    self.assertEquals(len(hiveMetaStore), 1)
+
+    validations = self.stackAdvisor.getComponentLayoutValidations(services, 
hosts)
+    self.assertEquals(len(validations), 0)
+
+
   def test_recommendHDFSConfigurations(self):
     configurations = {}
     clusterData = {
@@ -726,10 +763,10 @@ class TestHDP23StackAdvisor(TestCase):
         },
        'property_attributes': {
          'hive.auto.convert.join.noconditionaltask.size': {'maximum': 
'805306368'},
-         'hive.server2.authentication.pam.services': {'delete': 'true'}, 
-         'hive.server2.custom.authentication.class': {'delete': 'true'}, 
+         'hive.server2.authentication.pam.services': {'delete': 'true'},
+         'hive.server2.custom.authentication.class': {'delete': 'true'},
          'hive.server2.authentication.kerberos.principal': {'delete': 'true'},
-         'hive.server2.authentication.kerberos.keytab': {'delete': 'true'}, 
+         'hive.server2.authentication.kerberos.keytab': {'delete': 'true'},
          'hive.server2.authentication.ldap.url': {'delete': 'true'},
          'hive.server2.tez.default.queues': {
            'entries': [{'value': 'queue1', 'label': 'queue1 queue'}, {'value': 
'queue2', 'label': 'queue2 queue'}]

Reply via email to