Repository: ambari
Updated Branches:
  refs/heads/branch-2.4 d81fce8df -> 8eddba90e


AMBARI-17098 - Atlas Integration : Ambari overwrites 
users-credentials.properties and policy-store.txt


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8eddba90
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8eddba90
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8eddba90

Branch: refs/heads/branch-2.4
Commit: 8eddba90e87ae480af18a073177daabd77cd5bcd
Parents: d81fce8
Author: tbeerbower <tbeerbo...@hortonworks.com>
Authored: Tue Jun 7 15:53:13 2016 -0400
Committer: tbeerbower <tbeerbo...@hortonworks.com>
Committed: Tue Jun 7 15:54:16 2016 -0400

----------------------------------------------------------------------
 .../BlueprintConfigurationProcessor.java        |  61 +++++++
 .../ATLAS/0.1.0.2.3/package/scripts/params.py   |  37 -----
 .../configuration/application-properties.xml    |  14 +-
 .../stacks/HDP/2.5/services/stack_advisor.py    |  91 ++++++++++-
 .../BlueprintConfigurationProcessorTest.java    | 163 +++++++++++++++++++
 .../stacks/2.5/common/test_stack_advisor.py     | 133 +++++++++++++--
 6 files changed, 435 insertions(+), 64 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8eddba90/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 43ac1e9..de70a2c 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -2097,6 +2097,59 @@ public class BlueprintConfigurationProcessor {
     }
   }
 
+  /**
+   * Custom PropertyUpdater that handles the updating of the Atlas HA related 
properties.
+   */
+  private static class AtlasHAPropertyUpdater extends 
MultipleHostTopologyUpdater {
+
+    public AtlasHAPropertyUpdater() {
+      super("ATLAS_SERVER");
+    }
+
+    @Override
+    public String updateForClusterCreate(String propertyName, String 
origValue, Map<String,
+        Map<String, String>> properties, ClusterTopology topology) {
+
+      int serverId = 1;
+
+      StringBuilder sb = new StringBuilder();
+
+      Collection<String> hosts = 
topology.getHostAssignmentsForComponent("ATLAS_SERVER");
+
+      switch (propertyName) {
+        case "atlas.server.address.id1":
+
+          Map<String, String> applicationProperties = 
properties.get("application-properties");
+
+          Boolean ssl_enabled = 
Boolean.parseBoolean(applicationProperties.get("atlas.enableTLS"));
+
+          String port = ssl_enabled ? 
applicationProperties.get("atlas.server.https.port") :
+              applicationProperties.get("atlas.server.http.port");
+
+          for (String host : hosts) {
+
+            if (serverId > 1) {
+              
sb.append("\n").append("atlas.server.address.id").append(serverId).append("=");
+            }
+            sb.append(host).append(":").append(port);
+            ++serverId;
+          }
+          break;
+        case "atlas.server.ids":
+
+          while (serverId <= hosts.size()) {
+            if (serverId > 1) {
+              sb.append(",");
+            }
+            sb.append("id" + serverId++);
+          }
+          break;
+        default:
+          return origValue;
+      }
+      return sb.toString();
+    }
+  }
 
   /**
    * Custom PropertyUpdater that handles the parsing and updating of the
@@ -2576,6 +2629,14 @@ public class BlueprintConfigurationProcessor {
 
     // ATLAS
     atlasPropsMap.put("atlas.server.bind.address", new 
SingleHostTopologyUpdater("ATLAS_SERVER"));
+    PropertyUpdater atlasHAUpdater = new AtlasHAPropertyUpdater();
+    atlasPropsMap.put("atlas.server.ids", atlasHAUpdater);
+    atlasPropsMap.put("atlas.server.address.id1", atlasHAUpdater);
+    atlasPropsMap.put("atlas.kafka.bootstrap.servers", new 
MultipleHostTopologyUpdater("KAFKA_BROKER"));
+    atlasPropsMap.put("atlas.kafka.zookeeper.connect", new 
MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
+    atlasPropsMap.put("atlas.graph.index.search.solr.zookeeper-url", new 
MultipleHostTopologyUpdater("ZOOKEEPER_SERVER", ',', false, true, true));
+    atlasPropsMap.put("atlas.graph.storage.hostname", new 
MultipleHostTopologyUpdater("HBASE_MASTER"));
+    atlasPropsMap.put("atlas.audit.hbase.zookeeper.quorum", new 
MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
 
     // RANGER_ADMIN
     rangerAdminPropsMap.put("policymgr_external_url", new 
SingleHostTopologyUpdater("RANGER_ADMIN"));

http://git-wip-us.apache.org/repos/asf/ambari/blob/8eddba90/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
 
b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
index a79a456..e227915 100644
--- 
a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
+++ 
b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
@@ -127,40 +127,9 @@ if security_enabled:
 else:
     smoke_cmd = format('curl -s -o /dev/null -w "%{{http_code}}" 
{metadata_protocol}://{metadata_host}:{metadata_port}/')
 
-# kafka
-kafka_bootstrap_servers = ""
-kafka_broker_hosts = default('/clusterHostInfo/kafka_broker_hosts', [])
-
-if not len(kafka_broker_hosts) == 0:
-  kafka_broker_port = default("/configurations/kafka-broker/port", 6667)
-  kafka_bootstrap_servers = kafka_broker_hosts[0] + ":" + 
str(kafka_broker_port)
-
-kafka_zookeeper_connect = 
default("/configurations/kafka-broker/zookeeper.connect", None)
-
 # hbase
-hbase_zookeeper_quorum = 
default('/configurations/hbase-site/hbase.zookeeper.quorum', None)
 hbase_conf_dir = "/etc/hbase/conf"
 
-# atlas HA
-atlas_hosts = sorted(default('/clusterHostInfo/atlas_server_hosts', []))
-
-id = 1
-server_ids = ""
-server_hosts = ""
-first_id = True
-for host in atlas_hosts:
-  server_id = "id" + str(id)
-  server_host = host + ":" + metadata_port
-  if first_id:
-    server_ids = server_id
-    server_hosts = server_host
-  else:
-    server_ids += "," + server_id
-    server_hosts += "\n" + "atlas.server.address." + server_id + "=" + 
server_host
-
-  id += 1
-  first_id = False
-
 atlas_search_backend = 
default("/configurations/application-properties/atlas.graph.index.search.backend",
 "")
 search_backend_solr = atlas_search_backend.startswith('solr')
 
@@ -183,18 +152,12 @@ zookeeper_port = 
default('/configurations/zoo.cfg/clientPort', None)
 # get comma separated lists of zookeeper hosts from clusterHostInfo
 index = 0
 zookeeper_quorum = ""
-solr_zookeeper_url = ""
-
 for host in zookeeper_hosts:
   zookeeper_host = host
   if zookeeper_port is not None:
     zookeeper_host = host + ":" + str(zookeeper_port)
 
-  if logsearch_solr_znode is not None:
-    solr_zookeeper_url += zookeeper_host + logsearch_solr_znode
-
   zookeeper_quorum += zookeeper_host
   index += 1
   if index < len(zookeeper_hosts):
     zookeeper_quorum += ","
-    solr_zookeeper_url += ","

http://git-wip-us.apache.org/repos/asf/ambari/blob/8eddba90/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/application-properties.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/application-properties.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/application-properties.xml
index 98cbc30..80bfd6f 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/application-properties.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/application-properties.xml
@@ -29,7 +29,7 @@
   </property>
   <property>
     <name>atlas.server.ids</name>
-    <value>{{server_ids}}</value>
+    <value></value>
     <description>List of Atlas server ids for HA feature.</description>
     <value-attributes>
       <overridable>false</overridable>
@@ -40,7 +40,7 @@
   </property>
   <property>
     <name>atlas.server.address.id1</name>
-    <value>{{server_hosts}}</value>
+    <value></value>
     <description>Mapping of Atlas server ids to hosts.</description>
     <value-attributes>
       <overridable>false</overridable>
@@ -58,14 +58,14 @@
   </property>
   <property>
     <name>atlas.graph.storage.hostname</name>
-    <value>{{hbase_zookeeper_quorum}}</value>
+    <value></value>
     <description/>
     <on-ambari-upgrade add="false" change="true" delete="true"/>
     <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>atlas.audit.hbase.zookeeper.quorum</name>
-    <value>{{hbase_zookeeper_quorum}}</value>
+    <value></value>
     <description/>
     <on-ambari-upgrade add="false" change="true" delete="true"/>
     <on-stack-upgrade add="true" change="true" delete="false"/>
@@ -86,7 +86,7 @@
   </property>
   <property>
     <name>atlas.graph.index.search.solr.zookeeper-url</name>
-    <value>{{solr_zookeeper_url}}</value>
+    <value></value>
     <description>The ZooKeeper quorum setup for Solr as comma separated 
value.</description>
     <on-ambari-upgrade add="false" change="true" delete="true"/>
     <on-stack-upgrade add="true" change="true" delete="false"/>
@@ -121,14 +121,14 @@
   </property>
   <property>
     <name>atlas.kafka.bootstrap.servers</name>
-    <value>{{kafka_bootstrap_servers}}</value>
+    <value></value>
     <description>Comma separated list of Kafka broker endpoints in host:port 
form</description>
     <on-ambari-upgrade add="false" change="true" delete="true"/>
     <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>atlas.kafka.zookeeper.connect</name>
-    <value>{{kafka_zookeeper_connect}}</value>
+    <value></value>
     <description>Comma separated list of servers forming Zookeeper quorum used 
by Kafka.</description>
     <on-ambari-upgrade add="false" change="true" delete="true"/>
     <on-stack-upgrade add="true" change="true" delete="false"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8eddba90/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 4b1926a..6b5e2a1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -81,11 +81,13 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
                                   "If KAFKA is not installed then the Kafka 
zookeeper quorum configuration must be specified.")})
 
     if application_properties['atlas.graph.storage.backend'] == 'hbase':
+      hbase_zookeeper_quorum = 
services['configurations']['hbase-site']['properties']['hbase.zookeeper.quorum']
+
       if not application_properties['atlas.graph.storage.hostname']:
         validationItems.append({"config-name": "atlas.graph.storage.hostname",
                                 "item": self.getErrorItem(
                                     "If HBASE is not installed then the hbase 
zookeeper quorum configuration must be specified.")})
-      elif application_properties['atlas.graph.storage.hostname'] == 
'{{hbase_zookeeper_quorum}}':
+      elif application_properties['atlas.graph.storage.hostname'] == 
hbase_zookeeper_quorum:
         validationItems.append({"config-name": "atlas.graph.storage.hostname",
                                 "item": self.getWarnItem(
                                     "Note that Atlas is configured to use the 
HBASE instance being installed for this cluster.")})
@@ -174,21 +176,98 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
 
     servicesList = [service["StackServices"]["service_name"] for service in 
services["services"]]
 
+    # HA
+    atlas_hosts = self.getHostNamesWithComponent("ATLAS", "ATLAS_SERVER", 
services)
+
+    if 'atlas.enableTLS' in 
services['configurations']['application-properties']['properties']:
+      ssl_enabled = 
services['configurations']['application-properties']['properties']['atlas.enableTLS']
+    else:
+      ssl_enabled = 'false'
+
+    if ssl_enabled.lower == 'true':
+      if 'atlas.server.https.port' in 
services['configurations']['application-properties']['properties']:
+        metadata_port = 
services['configurations']['application-properties']['properties']['atlas.server.https.port']
+      else:
+        metadata_port = '21443'
+    else:
+      if 'atlas.server.http.port' in 
services['configurations']['application-properties']['properties']:
+        metadata_port = 
services['configurations']['application-properties']['properties']['atlas.server.http.port']
+      else:
+        metadata_port = '21000'
+
+    id = 1
+    server_ids = ""
+    server_hosts = ""
+
+    for host in atlas_hosts:
+      server_id = "id" + str(id)
+      server_host = host+ ":" + metadata_port
+
+      if id > 1:
+        server_ids += ","
+        server_hosts += "\n" + "atlas.server.address." + server_id + "="
+
+      server_ids += server_id
+      server_hosts += server_host
+
+      id += 1
+
+    putAtlasApplicationProperty('atlas.server.ids', server_ids)
+    putAtlasApplicationProperty('atlas.server.address.id1', server_hosts)
+
     if "LOGSEARCH" in servicesList:
-      
putAtlasApplicationProperty('atlas.graph.index.search.solr.zookeeper-url', 
'{{solr_zookeeper_url}}')
+
+      if 'logsearch_solr_znode' in 
services['configurations']['logsearch-solr-env']['properties']:
+        logsearch_solr_znode = 
services['configurations']['logsearch-solr-env']['properties']['logsearch_solr_znode']
+      else:
+        logsearch_solr_znode = None
+
+      zookeeper_hosts = self.getHostNamesWithComponent("ZOOKEEPER", 
"ZOOKEEPER_SERVER", services)
+      zookeeper_host_arr = []
+
+      zookeeper_port = self.getZKPort(services)
+      for i in range(len(zookeeper_hosts)):
+        zookeeper_host = zookeeper_hosts[i] + ':' + zookeeper_port
+        if logsearch_solr_znode is not None:
+          zookeeper_host += logsearch_solr_znode
+        zookeeper_host_arr.append(zookeeper_host)
+
+      solr_zookeeper_url = ",".join(zookeeper_host_arr)
+
+      
putAtlasApplicationProperty('atlas.graph.index.search.solr.zookeeper-url', 
solr_zookeeper_url)
     else:
       
putAtlasApplicationProperty('atlas.graph.index.search.solr.zookeeper-url', "")
 
     if "KAFKA" in servicesList:
-      putAtlasApplicationProperty('atlas.kafka.bootstrap.servers', 
'{{kafka_bootstrap_servers}}')
-      putAtlasApplicationProperty('atlas.kafka.zookeeper.connect', 
'{{kafka_zookeeper_connect}}')
+
+      kafka_hosts = self.getHostNamesWithComponent("KAFKA", "KAFKA_BROKER", 
services)
+
+      if 'port' in services['configurations']['kafka-broker']['properties']:
+        kafka_broker_port = 
services['configurations']['kafka-broker']['properties']['port']
+      else:
+        kafka_broker_port = '6667'
+
+      kafka_host_arr = []
+      for i in range(len(kafka_hosts)):
+        kafka_host_arr.append(kafka_hosts[i] + ':' + kafka_broker_port)
+
+      kafka_bootstrap_servers = ",".join(kafka_host_arr)
+
+      if 'zookeeper.connect' in 
services['configurations']['kafka-broker']['properties']:
+        kafka_zookeeper_connect = 
services['configurations']['kafka-broker']['properties']['zookeeper.connect']
+      else:
+        kafka_zookeeper_connect = None
+
+      putAtlasApplicationProperty('atlas.kafka.bootstrap.servers', 
kafka_bootstrap_servers)
+      putAtlasApplicationProperty('atlas.kafka.zookeeper.connect', 
kafka_zookeeper_connect)
     else:
       putAtlasApplicationProperty('atlas.kafka.bootstrap.servers', "")
       putAtlasApplicationProperty('atlas.kafka.zookeeper.connect', "")
 
     if "HBASE" in servicesList:
-      putAtlasApplicationProperty('atlas.graph.storage.hostname', 
'{{hbase_zookeeper_quorum}}')
-      putAtlasApplicationProperty('atlas.audit.hbase.zookeeper.quorum', 
'{{hbase_zookeeper_quorum}}')
+      hbase_zookeeper_quorum = 
services['configurations']['hbase-site']['properties']['hbase.zookeeper.quorum']
+      putAtlasApplicationProperty('atlas.graph.storage.hostname', 
hbase_zookeeper_quorum)
+      putAtlasApplicationProperty('atlas.audit.hbase.zookeeper.quorum', 
hbase_zookeeper_quorum)
     else:
       putAtlasApplicationProperty('atlas.graph.storage.hostname', "")
       putAtlasApplicationProperty('atlas.audit.hbase.zookeeper.quorum', "")

http://git-wip-us.apache.org/repos/asf/ambari/blob/8eddba90/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index 21083ef..9ec0a09 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -4741,6 +4741,169 @@ public class BlueprintConfigurationProcessorTest {
   }
 
   @Test
+  public void testAtlas() throws Exception {
+    final String expectedHostGroupName = "host_group_1";
+    final String host1 = "c6401.ambari.apache.org";
+    final String host2 = "c6402.ambari.apache.org";
+    final String host3 = "c6403.ambari.apache.org";
+
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> atlasProperties = new HashMap<String, String>();
+    properties.put("application-properties", atlasProperties);
+
+    // setup properties that include host information
+    atlasProperties.put("atlas.kafka.bootstrap.servers", "localhost:6667");
+    atlasProperties.put("atlas.kafka.zookeeper.connect", "localhost:2181");
+    atlasProperties.put("atlas.graph.index.search.solr.zookeeper-url", 
"localhost:2181/ambari-solr");
+    atlasProperties.put("atlas.graph.storage.hostname", "localhost");
+    atlasProperties.put("atlas.audit.hbase.zookeeper.quorum", "localhost");
+
+
+    Configuration clusterConfig = new Configuration(properties, 
Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("KAFKA_BROKER");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    hgComponents.add("HBASE_MASTER");
+    List<String> hosts = new ArrayList<String>();
+    hosts.add(host1);
+    hosts.add(host2);
+    hosts.add(host3);
+    TestHostGroup group1 = new TestHostGroup(expectedHostGroupName, 
hgComponents, hosts);
+
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group1);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(topology);
+
+    // call top-level cluster config update method
+    updater.doUpdateForClusterCreate();
+
+    List<String> hostArray =
+        
Arrays.asList(atlasProperties.get("atlas.kafka.bootstrap.servers").split(","));
+    List<String> expected =
+        
Arrays.asList("c6401.ambari.apache.org:6667","c6402.ambari.apache.org:6667", 
"c6403.ambari.apache.org:6667");
+
+    Assert.assertTrue(hostArray.containsAll(expected) && 
expected.containsAll(hostArray));
+
+    hostArray = 
Arrays.asList(atlasProperties.get("atlas.kafka.zookeeper.connect").split(","));
+    expected =
+        
Arrays.asList("c6401.ambari.apache.org:2181","c6402.ambari.apache.org:2181", 
"c6403.ambari.apache.org:2181");
+    Assert.assertTrue(hostArray.containsAll(expected) && 
expected.containsAll(hostArray));
+
+
+    hostArray = 
Arrays.asList(atlasProperties.get("atlas.graph.index.search.solr.zookeeper-url").split(","));
+    expected =
+        
Arrays.asList("c6401.ambari.apache.org:2181/ambari-solr","c6402.ambari.apache.org:2181/ambari-solr",
 "c6403.ambari.apache.org:2181/ambari-solr");
+    Assert.assertTrue(hostArray.containsAll(expected) && 
expected.containsAll(hostArray));
+
+    hostArray = 
Arrays.asList(atlasProperties.get("atlas.graph.storage.hostname").split(","));
+    expected =
+        Arrays.asList("c6401.ambari.apache.org","c6402.ambari.apache.org", 
"c6403.ambari.apache.org");
+    Assert.assertTrue(hostArray.containsAll(expected) && 
expected.containsAll(hostArray));
+
+    hostArray = 
Arrays.asList(atlasProperties.get("atlas.audit.hbase.zookeeper.quorum").split(","));
+    expected =
+        Arrays.asList("c6401.ambari.apache.org","c6402.ambari.apache.org", 
"c6403.ambari.apache.org");
+    Assert.assertTrue(hostArray.containsAll(expected) && 
expected.containsAll(hostArray));
+  }
+
+  @Test
+  public void testAtlasHA() throws Exception {
+    final String expectedHostGroupName = "host_group_1";
+    final String host1 = "c6401.ambari.apache.org";
+    final String host2 = "c6402.ambari.apache.org";
+    final String host3 = "c6403.ambari.apache.org";
+
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> atlasProperties = new HashMap<String, String>();
+    properties.put("application-properties", atlasProperties);
+
+    // setup properties that include host information
+    atlasProperties.put("atlas.server.ids", "");
+    atlasProperties.put("atlas.server.address.id1", "");
+    atlasProperties.put("atlas.server.http.port", "21000");
+    atlasProperties.put("atlas.server.https.port", "21443");
+    atlasProperties.put("atlas.enableTLS", "false");
+
+    Configuration clusterConfig = new Configuration(properties, 
Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("ATLAS_SERVER");
+    List<String> hosts = new ArrayList<String>();
+    hosts.add(host1);
+    hosts.add(host2);
+    hosts.add(host3);
+    TestHostGroup group1 = new TestHostGroup(expectedHostGroupName, 
hgComponents, hosts);
+
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group1);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(topology);
+
+    // call top-level cluster config update method
+    updater.doUpdateForClusterCreate();
+
+    assertEquals("id1,id2,id3", atlasProperties.get("atlas.server.ids"));
+
+    List<String> hostArray =
+        
Arrays.asList(atlasProperties.get("atlas.server.address.id1").split("\natlas.server.address.id.="));
+    List<String> expected =
+        
Arrays.asList("c6401.ambari.apache.org:21000","c6402.ambari.apache.org:21000", 
"c6403.ambari.apache.org:21000");
+
+    Assert.assertTrue(hostArray.containsAll(expected) && 
expected.containsAll(hostArray));
+  }
+
+  @Test
+  public void testAtlasHAEnableTLS() throws Exception {
+    final String expectedHostGroupName = "host_group_1";
+    final String host1 = "c6401.ambari.apache.org";
+    final String host2 = "c6402.ambari.apache.org";
+    final String host3 = "c6403.ambari.apache.org";
+
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> atlasProperties = new HashMap<String, String>();
+    properties.put("application-properties", atlasProperties);
+
+    // setup properties that include host information
+    atlasProperties.put("atlas.server.ids", "");
+    atlasProperties.put("atlas.server.address.id1", "");
+    atlasProperties.put("atlas.server.http.port", "21000");
+    atlasProperties.put("atlas.server.https.port", "21443");
+    atlasProperties.put("atlas.enableTLS", "true");
+
+    Configuration clusterConfig = new Configuration(properties, 
Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("ATLAS_SERVER");
+    List<String> hosts = new ArrayList<String>();
+    hosts.add(host1);
+    hosts.add(host2);
+    hosts.add(host3);
+    TestHostGroup group1 = new TestHostGroup(expectedHostGroupName, 
hgComponents, hosts);
+
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group1);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(topology);
+
+    // call top-level cluster config update method
+    updater.doUpdateForClusterCreate();
+
+    assertEquals("id1,id2,id3", atlasProperties.get("atlas.server.ids"));
+
+    List<String> hostArray =
+        
Arrays.asList(atlasProperties.get("atlas.server.address.id1").split("\natlas.server.address.id.="));
+    List<String> expected =
+        
Arrays.asList("c6401.ambari.apache.org:21443","c6402.ambari.apache.org:21443", 
"c6403.ambari.apache.org:21443");
+
+    Assert.assertTrue(hostArray.containsAll(expected) && 
expected.containsAll(hostArray));
+  }
+
+  @Test
   public void testHiveConfigClusterUpdateExportedHostGroupValue() throws 
Exception {
     final String expectedHostGroupName = "host_group_1";
     final String expectedHostName = "c6401.ambari.apache.org";

http://git-wip-us.apache.org/repos/asf/ambari/blob/8eddba90/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py 
b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index 74297de..22b3c52 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -5670,12 +5670,6 @@ class TestHDP25StackAdvisor(TestCase):
                       
self.expected_hive_interactive_site_default['hive-interactive-site']['properties']['hive.llap.daemon.queue.name'])
     
self.assertEquals(configurations['hive-interactive-site']['properties']['hive.server2.tez.default.queues'],
 'default')
 
-
-
-
-
-
-
   def test_recommendAtlasConfigurations(self):
     self.maxDiff = None
     configurations = {
@@ -5705,11 +5699,13 @@ class TestHDP25StackAdvisor(TestCase):
     expected = {
       'application-properties': {
         'properties': {
-          'atlas.graph.index.search.solr.zookeeper-url': 
'{{solr_zookeeper_url}}',
-          "atlas.audit.hbase.zookeeper.quorum": "",
-          "atlas.graph.storage.hostname": "",
-          "atlas.kafka.bootstrap.servers": "",
-          "atlas.kafka.zookeeper.connect": ""
+          'atlas.graph.index.search.solr.zookeeper-url': 
'c6401.ambari.apache.org:2181/logsearch',
+          "atlas.audit.hbase.zookeeper.quorum": "c6401.ambari.apache.org",
+          "atlas.graph.storage.hostname": "c6401.ambari.apache.org",
+          "atlas.kafka.bootstrap.servers": "c6401.ambari.apache.org:6667",
+          "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org",
+          'atlas.server.address.id1': "c6401.ambari.apache.org:21000",
+          'atlas.server.ids': "id1"
         }
       },
       "logsearch-solr-env": {
@@ -5721,7 +5717,7 @@ class TestHDP25StackAdvisor(TestCase):
     services = {
       "services": [
         {
-          "href": "/api/v1/stacks/HDP/versions/2.2/services/ATLAS",
+          "href": "/api/v1/stacks/HDP/versions/2.2/services/LOGSEARCH",
           "StackServices": {
             "service_name": "LOGSEARCH",
             "service_version": "2.6.0.2.2",
@@ -5738,7 +5734,103 @@ class TestHDP25StackAdvisor(TestCase):
                 "display_name": "solr",
                 "is_client": "false",
                 "is_master": "true",
-                "hostnames": []
+                "hostnames": ["c6401.ambari.apache.org"]
+              },
+              "dependencies": []
+            }
+          ]
+        },
+        {
+          "href": "/api/v1/stacks/HDP/versions/2.2/services/ZOOKEEPER",
+          "StackServices": {
+            "service_name": "ZOOKEEPER",
+            "service_version": "2.6.0.2.2",
+            "stack_name": "HDP",
+            "stack_version": "2.3"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "advertise_version": "false",
+                "cardinality": "1",
+                "component_category": "MASTER",
+                "component_name": "ZOOKEEPER_SERVER",
+                "display_name": "zk",
+                "is_client": "false",
+                "is_master": "true",
+                "hostnames": ["c6401.ambari.apache.org"]
+              },
+              "dependencies": []
+            }
+          ]
+        },
+        {
+          "href": "/api/v1/stacks/HDP/versions/2.2/services/HBASE",
+          "StackServices": {
+            "service_name": "HBASE",
+            "service_version": "2.6.0.2.2",
+            "stack_name": "HDP",
+            "stack_version": "2.3"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "advertise_version": "false",
+                "cardinality": "1",
+                "component_category": "MASTER",
+                "component_name": "HBASE_MASTER",
+                "display_name": "zk",
+                "is_client": "false",
+                "is_master": "true",
+                "hostnames": ["c6401.ambari.apache.org"]
+              },
+              "dependencies": []
+            }
+          ]
+        },
+        {
+          "href": "/api/v1/stacks/HDP/versions/2.2/services/ATLAS",
+          "StackServices": {
+            "service_name": "ATLAS",
+            "service_version": "2.6.0.2.2",
+            "stack_name": "HDP",
+            "stack_version": "2.3"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "advertise_version": "false",
+                "cardinality": "1",
+                "component_category": "MASTER",
+                "component_name": "ATLAS_SERVER",
+                "display_name": "atlas",
+                "is_client": "false",
+                "is_master": "true",
+                "hostnames": ["c6401.ambari.apache.org"]
+              },
+              "dependencies": []
+            }
+          ]
+        },
+        {
+          "href": "/api/v1/stacks/HDP/versions/2.2/services/KAFKA",
+          "StackServices": {
+            "service_name": "KAFKA",
+            "service_version": "2.6.0.2.2",
+            "stack_name": "HDP",
+            "stack_version": "2.3"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "advertise_version": "false",
+                "cardinality": "1",
+                "component_category": "MASTER",
+                "component_name": "KAFKA_BROKER",
+                "display_name": "atlas",
+                "is_client": "false",
+                "is_master": "true",
+                "hostnames": ["c6401.ambari.apache.org"]
               },
               "dependencies": []
             }
@@ -5752,13 +5844,26 @@ class TestHDP25StackAdvisor(TestCase):
             "atlas.audit.hbase.zookeeper.quorum": "",
             "atlas.graph.storage.hostname": "",
             "atlas.kafka.bootstrap.servers": "",
-            "atlas.kafka.zookeeper.connect": ""
+            "atlas.kafka.zookeeper.connect": "",
+            'atlas.server.address.id1': "",
+            'atlas.server.ids': ""
           }
         },
         "logsearch-solr-env": {
           "properties": {
             "logsearch_solr_znode": "/logsearch"
           }
+        },
+        "hbase-site": {
+          "properties": {
+            "hbase.zookeeper.quorum": "c6401.ambari.apache.org"
+          }
+        },
+        "kafka-broker": {
+          "properties": {
+            "zookeeper.connect": "c6401.ambari.apache.org",
+            "port": "6667"
+          }
         }
       },
       "changed-configurations": [ ]

Reply via email to