Repository: ambari
Updated Branches:
  refs/heads/trunk 763093b4b -> 7fa34c718


AMBARI-16822. Hostname substitutions for Namenode HA Initial State properties 
does not work. (Balazs Bence Sari via stoader)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7fa34c71
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7fa34c71
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7fa34c71

Branch: refs/heads/trunk
Commit: 7fa34c7189dd14c410df162d9579934002b9ba84
Parents: 763093b
Author: Balazs Bence Sari <bs...@hortonworks.com>
Authored: Wed May 25 12:37:24 2016 +0200
Committer: Toader, Sebastian <stoa...@hortonworks.com>
Committed: Wed May 25 12:37:51 2016 +0200

----------------------------------------------------------------------
 .../BlueprintConfigurationProcessor.java        |  27 ++--
 .../BlueprintConfigurationProcessorTest.java    | 133 ++++++++++++++++++-
 2 files changed, 143 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7fa34c71/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 3c3d8e5..4e404bc 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -169,6 +169,8 @@ public class BlueprintConfigurationProcessor {
       new SimplePropertyNameExportFilter("ldap-url", "kerberos-env"),
       new SimplePropertyNameExportFilter("container_dn", "kerberos-env"),
       new SimplePropertyNameExportFilter("domains", "krb5-conf"),
+      new SimplePropertyNameExportFilter("dfs_ha_initial_namenode_active", 
"hadoop-env"),
+      new SimplePropertyNameExportFilter("dfs_ha_initial_namenode_standby", 
"hadoop-env"),
       new StackPasswordPropertyFilter()
     };
 
@@ -240,7 +242,7 @@ public class BlueprintConfigurationProcessor {
    * @return Set of config type names that were updated by this update call
    */
   public Set<String> doUpdateForClusterCreate() throws 
ConfigurationTopologyException {
-    Set<String> configTypesUpdated = new HashSet<String>();
+      Set<String> configTypesUpdated = new HashSet<String>();
     Configuration clusterConfig = clusterTopology.getConfiguration();
     Map<String, HostGroupInfo> groupInfoMap = 
clusterTopology.getHostGroupInfo();
 
@@ -2235,7 +2237,8 @@ public class BlueprintConfigurationProcessor {
     Map<String, PropertyUpdater> kafkaBrokerNonTopologyMap = new 
HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> atlasPropsMap = new HashMap<String, 
PropertyUpdater>();
     Map<String, PropertyUpdater> mapredEnvMap = new HashMap<String, 
PropertyUpdater>();
-    Map<String, PropertyUpdater> hadoopEnvMap = new HashMap<String, 
PropertyUpdater>();
+    Map<String, PropertyUpdater> mHadoopEnvMap = new HashMap<String, 
PropertyUpdater>();
+    Map<String, PropertyUpdater> shHadoopEnvMap = new HashMap<String, 
PropertyUpdater>();
     Map<String, PropertyUpdater> hbaseEnvMap = new HashMap<String, 
PropertyUpdater>();
     Map<String, PropertyUpdater> hiveEnvMap = new HashMap<String, 
PropertyUpdater>();
     Map<String, PropertyUpdater> hiveInteractiveEnvMap = new HashMap<String, 
PropertyUpdater>();
@@ -2268,8 +2271,6 @@ public class BlueprintConfigurationProcessor {
     Map<String, PropertyUpdater> hawqSiteMap = new HashMap<String, 
PropertyUpdater>();
     Map<String, PropertyUpdater> zookeeperEnvMap = new HashMap<String, 
PropertyUpdater>();
 
-
-
     singleHostTopologyUpdaters.put("ams-site", amsSiteMap);
     singleHostTopologyUpdaters.put("hdfs-site", hdfsSiteMap);
     singleHostTopologyUpdaters.put("mapred-site", mapredSiteMap);
@@ -2295,12 +2296,13 @@ public class BlueprintConfigurationProcessor {
     singleHostTopologyUpdaters.put("ranger-knox-audit", 
rangerKnoxAuditPropsMap);
     singleHostTopologyUpdaters.put("ranger-kafka-audit", 
rangerKafkaAuditPropsMap);
     singleHostTopologyUpdaters.put("ranger-storm-audit", 
rangerStormAuditPropsMap);
+    singleHostTopologyUpdaters.put("hadoop-env", shHadoopEnvMap);
 
     singleHostTopologyUpdaters.put("hawq-site", hawqSiteMap);
     singleHostTopologyUpdaters.put("zookeeper-env", zookeeperEnvMap);
 
 
-    mPropertyUpdaters.put("hadoop-env", hadoopEnvMap);
+    mPropertyUpdaters.put("hadoop-env", mHadoopEnvMap);
     mPropertyUpdaters.put("hbase-env", hbaseEnvMap);
     mPropertyUpdaters.put("mapred-env", mapredEnvMap);
     mPropertyUpdaters.put("oozie-env", oozieEnvHeapSizeMap);
@@ -2345,6 +2347,9 @@ public class BlueprintConfigurationProcessor {
     // HDFS shared.edits JournalNode Quorum URL uses semi-colons as separators
     multiHdfsSiteMap.put("dfs.namenode.shared.edits.dir", new 
MultipleHostTopologyUpdater("JOURNALNODE", ';', false, false, true));
     multiHdfsSiteMap.put("dfs.encryption.key.provider.uri", new 
MultipleHostTopologyUpdater("RANGER_KMS_SERVER", ';', false, false, false));
+    // Explicit initial primary/secondary node assignment in HA
+    shHadoopEnvMap.put("dfs_ha_initial_namenode_active", new 
SingleHostTopologyUpdater("NAMENODE"));
+    shHadoopEnvMap.put("dfs_ha_initial_namenode_standby", new 
SingleHostTopologyUpdater("NAMENODE"));
 
     // SECONDARY_NAMENODE
     hdfsSiteMap.put("dfs.secondary.http.address", new 
SingleHostTopologyUpdater("SECONDARY_NAMENODE"));
@@ -2593,12 +2598,12 @@ public class BlueprintConfigurationProcessor {
       new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
     // Required due to AMBARI-4933.  These no longer seem to be required as 
the default values in the stack
     // are now correct but are left here in case an existing blueprint still 
contains an old value.
-    hadoopEnvMap.put("namenode_heapsize", new MPropertyUpdater());
-    hadoopEnvMap.put("namenode_opt_newsize", new MPropertyUpdater());
-    hadoopEnvMap.put("namenode_opt_maxnewsize", new MPropertyUpdater());
-    hadoopEnvMap.put("namenode_opt_permsize", new MPropertyUpdater());
-    hadoopEnvMap.put("namenode_opt_maxpermsize", new MPropertyUpdater());
-    hadoopEnvMap.put("dtnode_heapsize", new MPropertyUpdater());
+    mHadoopEnvMap.put("namenode_heapsize", new MPropertyUpdater());
+    mHadoopEnvMap.put("namenode_opt_newsize", new MPropertyUpdater());
+    mHadoopEnvMap.put("namenode_opt_maxnewsize", new MPropertyUpdater());
+    mHadoopEnvMap.put("namenode_opt_permsize", new MPropertyUpdater());
+    mHadoopEnvMap.put("namenode_opt_maxpermsize", new MPropertyUpdater());
+    mHadoopEnvMap.put("dtnode_heapsize", new MPropertyUpdater());
     mapredEnvMap.put("jtnode_opt_newsize", new MPropertyUpdater());
     mapredEnvMap.put("jtnode_opt_maxnewsize", new MPropertyUpdater());
     mapredEnvMap.put("jtnode_heapsize", new MPropertyUpdater());

http://git-wip-us.apache.org/repos/asf/ambari/blob/7fa34c71/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index b154e23..ec04fd2 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -28,6 +28,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import com.google.common.collect.*;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.ValueAttributesInfo;
@@ -54,12 +55,6 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
 import static junit.framework.Assert.assertEquals;
 import static junit.framework.Assert.assertFalse;
 import static junit.framework.Assert.assertNotNull;
@@ -992,6 +987,82 @@ public class BlueprintConfigurationProcessorTest {
   }
 
   @Test
+  public void 
testDoNameNodeHighAvailabilityExportWithHAEnabledPrimaryNamePreferenceNotExported()
 throws Exception {
+    final String expectedNameService = "mynameservice";
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedPortNum = "808080";
+    final String expectedNodeOne = "nn1";
+    final String expectedNodeTwo = "nn2";
+    final String expectedHostGroupName = "host_group_1";
+
+    Map<String, Map<String, String>> configProperties = new HashMap<>();
+    Map<String, String> hdfsSiteProperties = new HashMap<>();
+    Map<String, String> coreSiteProperties = new HashMap<>();
+    Map<String, String> hbaseSiteProperties = new HashMap<>();
+    Map<String, String> hadoopEnvProperties = new HashMap<>();
+
+    configProperties.put("hdfs-site", hdfsSiteProperties);
+    configProperties.put("core-site", coreSiteProperties);
+    configProperties.put("hbase-site", hbaseSiteProperties);
+    configProperties.put("hadoop-env", hadoopEnvProperties);
+
+    // setup hdfs config for test
+    hdfsSiteProperties.put("dfs.internal.nameservices", expectedNameService);
+    hdfsSiteProperties.put("dfs.nameservices", expectedNameService);
+    hdfsSiteProperties.put("dfs.ha.namenodes.mynameservice", expectedNodeOne + 
", " + expectedNodeTwo);
+
+    // setup properties that include host information
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService 
+ "." + expectedNodeOne, expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService 
+ "." + expectedNodeTwo, expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService 
+ "." + expectedNodeOne, expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService 
+ "." + expectedNodeTwo, expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + 
"." + expectedNodeOne, expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + 
"." + expectedNodeTwo, expectedHostName + ":" + expectedPortNum);
+
+    // setup primary & secondary name node preference
+    hadoopEnvProperties.put("dfs_ha_initial_namenode_active", 
expectedHostName);
+    hadoopEnvProperties.put("dfs_ha_initial_namenode_standby", 
expectedHostName);
+
+    Configuration clusterConfig = new Configuration(configProperties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
+    // note: test hostgroups may not accurately reflect the required 
components for the config properties
+    // which are mapped to them.  Only the hostgroup name is used for 
hostgroup resolution an the components
+    // are not validated
+    Collection<String> groupComponents = new HashSet<String>();
+    groupComponents.add("NAMENODE");
+    Collection<String> hosts = new ArrayList<String>();
+    hosts.add(expectedHostName);
+    hosts.add("serverTwo");
+    TestHostGroup group = new TestHostGroup(expectedHostGroupName, 
groupComponents, hosts);
+
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
+    configProcessor.doUpdateForBlueprintExport();
+
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), 
hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + 
"." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), 
hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + 
"." + expectedNodeTwo));
+
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), 
hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." 
+ expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), 
hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." 
+ expectedNodeTwo));
+
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), 
hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." 
+ expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), 
hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." 
+ expectedNodeTwo));
+
+    assertNull("Initial NameNode HA property exported although should not 
have", hadoopEnvProperties.get("dfs_ha_initial_namenode_active"));
+    assertNull("Initial NameNode HA property exported although should not 
have", hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
+  }
+
+  @Test
   public void 
testDoNameNodeHighAvailabilityExportWithHAEnabledNameServicePropertiesIncluded()
 throws Exception {
     final String expectedNameService = "mynameservice";
     final String expectedHostName = "c6401.apache.ambari.org";
@@ -5270,6 +5341,56 @@ public class BlueprintConfigurationProcessorTest {
   }
 
   @Test
+  public void testHadoopHaNameNode() throws Exception {
+    // Given
+    final String configType = "hadoop-env";
+    Map<String, Map<String, String>> properties = new HashMap<>();
+
+    // enable HA
+    Map<String, String> hdfsSite = new HashMap<>();
+    hdfsSite.put("dfs.nameservices", "mycluster");
+    hdfsSite.put("dfs.ha.namenodes.mycluster", "nn1,nn2");
+    hdfsSite.put("dfs.namenode.http-address", "%HOSTGROUP::master_1%:50070");
+    hdfsSite.put("dfs.namenode.http-address.mycluster.nn1", 
"%HOSTGROUP::master_1%:50070");
+    hdfsSite.put("dfs.namenode.http-address.mycluster.nn2", 
"%HOSTGROUP::master_2%:50070");
+    hdfsSite.put("dfs.namenode.https-address", "%HOSTGROUP::master_1%:50470");
+    hdfsSite.put("dfs.namenode.https-address.mycluster.nn1", 
"%HOSTGROUP::master_1%:50470");
+    hdfsSite.put("dfs.namenode.https-address.mycluster.nn2", 
"%HOSTGROUP::master_2%:50470");
+    hdfsSite.put("dfs.namenode.rpc-address.mycluster.nn1", 
"%HOSTGROUP::master_1%:8020");
+    hdfsSite.put("dfs.namenode.rpc-address.mycluster.nn2", 
"%HOSTGROUP::master_2%:8020");
+    hdfsSite.put("dfs.namenode.shared.edits.dir", 
"qjournal://%HOSTGROUP::master_1%:8485;%HOSTGROUP::master_2%:8485;%HOSTGROUP::master_2%:8485/mycluster");
+    hdfsSite.put("dfs.ha.automatic-failover.enabled", "true");
+    hdfsSite.put("dfs.ha.fencing.methods", "shell(/bin/true)");
+    properties.put("hdfs-site", hdfsSite);
+
+    Map<String, String> hadoopEnv = new HashMap<>();
+    hadoopEnv.put("dfs_ha_initial_namenode_active", "%HOSTGROUP::master_1%");
+    hadoopEnv.put("dfs_ha_initial_namenode_standby", "%HOSTGROUP::master_2%");
+    properties.put("hadoop-env", hadoopEnv);
+
+    Map<String, Map<String, String>> parentProperties = new HashMap<>();
+    Configuration parentClusterConfig = new Configuration(parentProperties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+    Configuration clusterConfig = new Configuration(properties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap(), 
parentClusterConfig);
+
+    TestHostGroup group1 = new TestHostGroup("master_1", 
ImmutableSet.of("DATANODE", "NAMENODE"), Collections.singleton("node_1"));
+    TestHostGroup group2 = new TestHostGroup("master_2", 
ImmutableSet.of("DATANODE", "NAMENODE"), Collections.singleton("node_2"));
+
+    Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
+
+    // When
+    configProcessor.doUpdateForClusterCreate();
+
+    // Then
+    assertEquals("node_1", clusterConfig.getPropertyValue(configType, 
"dfs_ha_initial_namenode_active"));
+    assertEquals("node_2", clusterConfig.getPropertyValue(configType, 
"dfs_ha_initial_namenode_standby"));
+  }
+
+  @Test
   public void testGetRequiredHostGroups___validComponentCountOfZero() throws 
Exception {
     Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
     Map<String, String> hiveSite = new HashMap<String, String>();

Reply via email to