Repository: ambari
Updated Branches:
  refs/heads/trunk d8e8a0915 -> ce69d0077


AMBARI-8021.  Adds Blueprint export handling for Knox and Zookeeper properties 
in HDP 2.2 stack


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ce69d007
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ce69d007
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ce69d007

Branch: refs/heads/trunk
Commit: ce69d0077c9f45f9ad0824a40352771604b3b17a
Parents: d8e8a09
Author: Robert Nettleton <rnettle...@hortonworks.com>
Authored: Thu Oct 30 11:52:11 2014 -0400
Committer: John Speidel <jspei...@hortonworks.com>
Committed: Thu Oct 30 13:39:08 2014 -0400

----------------------------------------------------------------------
 .../BlueprintConfigurationProcessor.java        |  25 ++++
 .../BlueprintConfigurationProcessorTest.java    | 137 +++++++++++++++++++
 2 files changed, 162 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ce69d007/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index be379c9..9237bc9 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -882,6 +882,8 @@ public class BlueprintConfigurationProcessor {
     Map<String, PropertyUpdater> oozieSiteMap = new HashMap<String, 
PropertyUpdater>();
     Map<String, PropertyUpdater> stormSiteMap = new HashMap<String, 
PropertyUpdater>();
     Map<String, PropertyUpdater> falconStartupPropertiesMap = new 
HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> kafkaBrokerMap = new HashMap<String, 
PropertyUpdater>();
+
 
 
     Map<String, PropertyUpdater> mapredEnvMap = new HashMap<String, 
PropertyUpdater>();
@@ -898,6 +900,13 @@ public class BlueprintConfigurationProcessor {
     Map<String, PropertyUpdater> multiCoreSiteMap = new HashMap<String, 
PropertyUpdater>();
     Map<String, PropertyUpdater> multiHdfsSiteMap = new HashMap<String, 
PropertyUpdater>();
     Map<String, PropertyUpdater> multiHiveSiteMap = new HashMap<String, 
PropertyUpdater>();
+    Map<String, PropertyUpdater> multiKafkaBrokerMap = new HashMap<String, 
PropertyUpdater>();
+    Map<String, PropertyUpdater> multiSliderClientMap = new HashMap<String, 
PropertyUpdater>();
+    Map<String, PropertyUpdater> multiYarnSiteMap = new HashMap<String, 
PropertyUpdater>();
+    Map<String, PropertyUpdater> multiOozieSiteMap = new HashMap<String, 
PropertyUpdater>();
+
+
+
 
 
     Map<String, PropertyUpdater> dbHiveSiteMap = new HashMap<String, 
PropertyUpdater>();
@@ -917,6 +926,7 @@ public class BlueprintConfigurationProcessor {
     singleHostTopologyUpdaters.put("nagios-env", nagiosEnvMap);
     singleHostTopologyUpdaters.put("hive-env", hiveEnvMap);
     singleHostTopologyUpdaters.put("oozie-env", oozieEnvMap);
+    singleHostTopologyUpdaters.put("kafka-broker", kafkaBrokerMap);
 
     mPropertyUpdaters.put("hadoop-env", hadoopEnvMap);
     mPropertyUpdaters.put("hbase-env", hbaseEnvMap);
@@ -928,6 +938,10 @@ public class BlueprintConfigurationProcessor {
     multiHostTopologyUpdaters.put("core-site", multiCoreSiteMap);
     multiHostTopologyUpdaters.put("hdfs-site", multiHdfsSiteMap);
     multiHostTopologyUpdaters.put("hive-site", multiHiveSiteMap);
+    multiHostTopologyUpdaters.put("kafka-broker", multiKafkaBrokerMap);
+    multiHostTopologyUpdaters.put("slider-client", multiSliderClientMap);
+    multiHostTopologyUpdaters.put("yarn-site", multiYarnSiteMap);
+    multiHostTopologyUpdaters.put("oozie-site", multiOozieSiteMap);
 
     dbHostTopologyUpdaters.put("hive-site", dbHiveSiteMap);
 
@@ -1000,6 +1014,9 @@ public class BlueprintConfigurationProcessor {
     multiHbaseSiteMap.put("hbase.zookeeper.quorum", new 
MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
     multiWebhcatSiteMap.put("templeton.zookeeper.hosts", new 
MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
     multiCoreSiteMap.put("ha.zookeeper.quorum", new 
MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
+    multiYarnSiteMap.put("hadoop.registry.zk.quorum", new 
MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
+    multiSliderClientMap.put("slider.zookeeper.quorum", new 
MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
+    multiKafkaBrokerMap.put("zookeeper.connect", new 
MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
 
     // STORM
     stormSiteMap.put("nimbus.host", new SingleHostTopologyUpdater("NIMBUS"));
@@ -1018,6 +1035,14 @@ public class BlueprintConfigurationProcessor {
     // NAGIOS
     nagiosEnvMap.put("nagios_principal_name", new 
SingleHostTopologyUpdater("NAGIOS_SERVER"));
 
+    // KAFKA
+    kafkaBrokerMap.put("kafka.ganglia.metrics.host", new 
SingleHostTopologyUpdater("GANGLIA_SERVER"));
+
+    // KNOX
+    multiCoreSiteMap.put("hadoop.proxyuser.knox.hosts", new 
MultipleHostTopologyUpdater("KNOX_GATEWAY"));
+    multiWebhcatSiteMap.put("webhcat.proxyuser.knox.hosts", new 
MultipleHostTopologyUpdater("KNOX_GATEWAY"));
+    multiOozieSiteMap.put("hadoop.proxyuser.knox.hosts", new 
MultipleHostTopologyUpdater("KNOX_GATEWAY"));
+
 
     // Required due to AMBARI-4933.  These no longer seem to be required as 
the default values in the stack
     // are now correct but are left here in case an existing blueprint still 
contains an old value.

http://git-wip-us.apache.org/repos/asf/ambari/blob/ce69d007/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index cf06064..6aeb2ab 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -2011,6 +2011,8 @@ public class BlueprintConfigurationProcessorTest {
     final String expectedHostNameTwo = "c6402.ambari.apache.org";
     final String expectedHostGroupName = "host_group_1";
     final String expectedHostGroupNameTwo = "host_group_2";
+    final String expectedPortNumberOne = "2112";
+    final String expectedPortNumberTwo = "1221";
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
@@ -2033,14 +2035,29 @@ public class BlueprintConfigurationProcessorTest {
       new HashMap<String, String>();
     Map<String, String> webHCatSiteProperties =
       new HashMap<String, String>();
+    Map<String, String> sliderClientProperties =
+      new HashMap<String, String>();
+    Map<String, String> yarnSiteProperties =
+      new HashMap<String, String>();
+    Map<String, String> kafkaBrokerProperties =
+      new HashMap<String, String>();
+
+
 
     configProperties.put("core-site", coreSiteProperties);
     configProperties.put("hbase-site", hbaseSiteProperties);
     configProperties.put("webhcat-site", webHCatSiteProperties);
+    configProperties.put("slider-client", sliderClientProperties);
+    configProperties.put("yarn-site", yarnSiteProperties);
+    configProperties.put("kafka-broker", kafkaBrokerProperties);
 
     coreSiteProperties.put("ha.zookeeper.quorum", expectedHostName + "," + 
expectedHostNameTwo);
     hbaseSiteProperties.put("hbase.zookeeper.quorum", expectedHostName + "," + 
expectedHostNameTwo);
     webHCatSiteProperties.put("templeton.zookeeper.hosts", expectedHostName + 
"," + expectedHostNameTwo);
+    yarnSiteProperties.put("hadoop.registry.zk.quorum", 
createHostAddress(expectedHostName, expectedPortNumberOne) + "," + 
createHostAddress(expectedHostNameTwo, expectedPortNumberTwo));
+    sliderClientProperties.put("slider.zookeeper.quorum", 
createHostAddress(expectedHostName, expectedPortNumberOne) + "," + 
createHostAddress(expectedHostNameTwo, expectedPortNumberTwo));
+    kafkaBrokerProperties.put("zookeeper.connect", 
createHostAddress(expectedHostName, expectedPortNumberOne) + "," + 
createHostAddress(expectedHostNameTwo, expectedPortNumberTwo));
+
 
     BlueprintConfigurationProcessor configProcessor =
       new BlueprintConfigurationProcessor(configProperties);
@@ -2057,6 +2074,117 @@ public class BlueprintConfigurationProcessorTest {
     assertEquals("zookeeper config not properly exported",
       createExportedHostName(expectedHostGroupName) + "," + 
createExportedHostName(expectedHostGroupNameTwo),
       webHCatSiteProperties.get("templeton.zookeeper.hosts"));
+    assertEquals("yarn-site zookeeper config not properly exported",
+      createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + 
"," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
+      yarnSiteProperties.get("hadoop.registry.zk.quorum"));
+    assertEquals("slider-client zookeeper config not properly exported",
+      createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + 
"," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
+      sliderClientProperties.get("slider.zookeeper.quorum"));
+    assertEquals("kafka zookeeper config not properly exported",
+      createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + 
"," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
+      kafkaBrokerProperties.get("zookeeper.connect"));
+
+    mockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testKnoxSecurityConfigExported() throws Exception {
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedHostNameTwo = "c6402.ambari.apache.org";
+    final String expectedHostGroupName = "host_group_1";
+    final String expectedHostGroupNameTwo = "host_group_2";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+    HostGroup mockHostGroupTwo = mockSupport.createMock(HostGroup.class);
+
+    
expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName,
 "serverTwo")).atLeastOnce();
+    
expect(mockHostGroupTwo.getHostInfo()).andReturn(Arrays.asList(expectedHostNameTwo,
 "serverTwo")).atLeastOnce();
+    
expect(mockHostGroupOne.getName()).andReturn(expectedHostGroupName).atLeastOnce();
+    
expect(mockHostGroupTwo.getName()).andReturn(expectedHostGroupNameTwo).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> coreSiteProperties =
+      new HashMap<String, String>();
+    Map<String, String> webHCatSiteProperties =
+      new HashMap<String, String>();
+    Map<String, String> oozieSiteProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("core-site", coreSiteProperties);
+    configProperties.put("webhcat-site", webHCatSiteProperties);
+    configProperties.put("oozie-site", oozieSiteProperties);
+
+    coreSiteProperties.put("hadoop.proxyuser.knox.hosts", expectedHostName + 
"," + expectedHostNameTwo);
+    webHCatSiteProperties.put("webhcat.proxyuser.knox.hosts", expectedHostName 
+ "," + expectedHostNameTwo);
+    oozieSiteProperties.put("hadoop.proxyuser.knox.hosts", expectedHostName + 
"," + expectedHostNameTwo);
+
+//    multiCoreSiteMap.put("hadoop.proxyuser.knox.hosts", new 
MultipleHostTopologyUpdater("KNOX_GATEWAY"));
+//    multiWebhcatSiteMap.put("webhcat.proxyuser.knox.hosts", new 
MultipleHostTopologyUpdater("KNOX_GATEWAY"));
+//    multiOozieSiteMap.put("hadoop.proxyuser.knox.hosts", new 
MultipleHostTopologyUpdater("KNOX_GATEWAY"));
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    // call top-level export method
+    configProcessor.doUpdateForBlueprintExport(Arrays.asList(mockHostGroupOne, 
mockHostGroupTwo));
+
+    assertEquals("Knox for core-site config not properly exported",
+      createExportedHostName(expectedHostGroupName) + "," + 
createExportedHostName(expectedHostGroupNameTwo),
+      coreSiteProperties.get("hadoop.proxyuser.knox.hosts"));
+    assertEquals("Knox config for WebHCat not properly exported",
+      createExportedHostName(expectedHostGroupName) + "," + 
createExportedHostName(expectedHostGroupNameTwo),
+      webHCatSiteProperties.get("webhcat.proxyuser.knox.hosts"));
+    assertEquals("Knox config for Oozie not properly exported",
+      createExportedHostName(expectedHostGroupName) + "," + 
createExportedHostName(expectedHostGroupNameTwo),
+      oozieSiteProperties.get("hadoop.proxyuser.knox.hosts"));
+
+    mockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testKafkaConfigExported() throws Exception {
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedHostGroupName = "host_group_1";
+    final String expectedPortNumberOne = "2112";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+    HostGroup mockHostGroupTwo = mockSupport.createMock(HostGroup.class);
+
+    
expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName,
 "serverTwo")).atLeastOnce();
+    
expect(mockHostGroupOne.getName()).andReturn(expectedHostGroupName).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> kafkaBrokerProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("kafka-broker", kafkaBrokerProperties);
+
+    kafkaBrokerProperties.put("kafka.ganglia.metrics.host", 
createHostAddress(expectedHostName, expectedPortNumberOne));
+
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    // call top-level export method
+    configProcessor.doUpdateForBlueprintExport(Arrays.asList(mockHostGroupOne, 
mockHostGroupTwo));
+
+    assertEquals("kafka Ganglia config not properly exported",
+      createExportedHostName(expectedHostGroupName, expectedPortNumberOne),
+      kafkaBrokerProperties.get("kafka.ganglia.metrics.host"));
 
     mockSupport.verifyAll();
 
@@ -2104,10 +2232,19 @@ public class BlueprintConfigurationProcessorTest {
     return createExportedHostName(expectedHostGroupName) + ":" + 
expectedPortNum;
   }
 
+  private static String createExportedHostName(String expectedHostGroupName, 
String expectedPortNumber) {
+    return createExportedHostName(expectedHostGroupName) + ":" + 
expectedPortNumber;
+  }
+
+
   private static String createExportedHostName(String expectedHostGroupName) {
     return "%HOSTGROUP::" + expectedHostGroupName + "%";
   }
 
+  private static String createHostAddress(String hostName, String portNumber) {
+    return hostName + ":" + portNumber;
+  }
+
   private class TestHostGroup implements HostGroup {
 
     private String name;

Reply via email to