This is an automated email from the ASF dual-hosted git repository.

rnettleton pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 8d1f100  [AMBARI-23397] Blueprint deployments for NameNode Federation 
Fail (#822)
8d1f100 is described below

commit 8d1f100212de5d49ee848577391a7a1f25e8dadb
Author: rnettleton <rnettle...@hortonworks.com>
AuthorDate: Thu Mar 29 10:53:26 2018 -0400

    [AMBARI-23397] Blueprint deployments for NameNode Federation Fail (#822)
    
    * Initial changes for NameNode Federation support in Blueprints
    
    * Added unit test to verify changes to BlueprintConfigurationProcessor
---
 .../internal/BlueprintConfigurationProcessor.java  |   6 +-
 .../server/topology/ClusterTopologyImpl.java       |   4 +-
 .../BlueprintConfigurationProcessorTest.java       | 133 +++++++++++++++++++++
 3 files changed, 138 insertions(+), 5 deletions(-)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 1bb2879..7f1361a 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -409,8 +409,8 @@ public class BlueprintConfigurationProcessor {
       // if the active/stanbdy namenodes are not specified, assign them 
automatically
       if (! isNameNodeHAInitialActiveNodeSet(clusterProps) && ! 
isNameNodeHAInitialStandbyNodeSet(clusterProps)) {
         Collection<String> nnHosts = 
clusterTopology.getHostAssignmentsForComponent("NAMENODE");
-        if (nnHosts.size() != 2) {
-          throw new ConfigurationTopologyException("NAMENODE HA requires 
exactly 2 hosts running NAMENODE but there are: " +
+        if (nnHosts.size() < 2) {
+          throw new ConfigurationTopologyException("NAMENODE HA requires at 
least 2 hosts running NAMENODE but there are: " +
               nnHosts.size() + " Hosts: " + nnHosts);
         }
 
@@ -1467,7 +1467,7 @@ public class BlueprintConfigurationProcessor {
           if (matchingGroupCount == 0 && cardinality.isValidCount(0)) {
             return origValue;
           } else {
-            if (topology.isNameNodeHAEnabled() && isComponentNameNode() && 
(matchingGroupCount == 2)) {
+            if (topology.isNameNodeHAEnabled() && isComponentNameNode() && 
(matchingGroupCount >= 2)) {
               // if this is the defaultFS property, it should reflect the 
nameservice name,
               // rather than a hostname (used in non-HA scenarios)
               if (properties.containsKey("core-site") && 
properties.get("core-site").get("fs.defaultFS").equals(origValue)) {
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
index ab6d852..4860a94 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
@@ -210,8 +210,8 @@ public class ClusterTopologyImpl implements ClusterTopology 
{
 
     if(isNameNodeHAEnabled()){
         Collection<String> nnHosts = 
getHostAssignmentsForComponent("NAMENODE");
-        if (nnHosts.size() != 2) {
-            throw new InvalidTopologyException("NAMENODE HA requires exactly 2 
hosts running NAMENODE but there are: " +
+        if (nnHosts.size() < 2) {
+            throw new InvalidTopologyException("NAMENODE HA requires at least 
2 hosts running NAMENODE but there are: " +
                 nnHosts.size() + " Hosts: " + nnHosts);
         }
         Map<String, String> hadoopEnvConfig = 
configuration.getFullProperties().get("hadoop-env");
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index d75a962..0d84550 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -5327,6 +5327,139 @@ public class BlueprintConfigurationProcessorTest 
extends EasyMockSupport {
   }
 
   @Test
+  public void testDoUpdateForClusterWithNameNodeHAEnabledThreeNameNodes() 
throws Exception {
+    final String expectedNameService = "mynameservice";
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedHostNameTwo = "server-two";
+    final String expectedHostNameThree = "server-three";
+    final String expectedPortNum = "808080";
+    final String expectedNodeOne = "nn1";
+    final String expectedNodeTwo = "nn2";
+    final String expectedHostGroupName = "host_group_1";
+
+    Map<String, Map<String, String>> properties = new HashMap<>();
+
+    Map<String, String> hdfsSiteProperties = new HashMap<>();
+    Map<String, String> hbaseSiteProperties = new HashMap<>();
+    Map<String, String> hadoopEnvProperties = new HashMap<>();
+    Map<String, String> coreSiteProperties = new HashMap<>();
+    Map<String, String> accumuloSiteProperties = new HashMap<>();
+
+    properties.put("hdfs-site", hdfsSiteProperties);
+    properties.put("hadoop-env", hadoopEnvProperties);
+    properties.put("core-site", coreSiteProperties);
+    properties.put("hbase-site", hbaseSiteProperties);
+    properties.put("accumulo-site", accumuloSiteProperties);
+
+    // setup hdfs HA config for test
+    hdfsSiteProperties.put("dfs.nameservices", expectedNameService);
+    hdfsSiteProperties.put("dfs.ha.namenodes.mynameservice", expectedNodeOne + 
", " + expectedNodeTwo);
+
+    // setup properties that include exported host group information
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService 
+ "." + expectedNodeOne, createExportedAddress(expectedPortNum, 
expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService 
+ "." + expectedNodeTwo, createExportedAddress(expectedPortNum, 
expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService 
+ "." + expectedNodeOne, createExportedAddress(expectedPortNum, 
expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService 
+ "." + expectedNodeTwo, createExportedAddress(expectedPortNum, 
expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + 
"." + expectedNodeOne, createExportedAddress(expectedPortNum, 
expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + 
"." + expectedNodeTwo, createExportedAddress(expectedPortNum, 
expectedHostGroupName));
+
+    // add properties that require the SECONDARY_NAMENODE, which
+    // is not included in this test
+    hdfsSiteProperties.put("dfs.secondary.http.address", "localhost:8080");
+    hdfsSiteProperties.put("dfs.namenode.secondary.http-address", 
"localhost:8080");
+
+
+    // add properties that are used in non-HA HDFS NameNode settings
+    // to verify that these are eventually removed by the filter
+    hdfsSiteProperties.put("dfs.namenode.http-address", "localhost:8080");
+    hdfsSiteProperties.put("dfs.namenode.https-address", "localhost:8081");
+    hdfsSiteProperties.put("dfs.namenode.rpc-address", "localhost:8082");
+
+    // configure the defaultFS to use the nameservice URL
+    coreSiteProperties.put("fs.defaultFS", "hdfs://" + expectedNameService);
+
+    // configure the hbase rootdir to use the nameservice URL
+    hbaseSiteProperties.put("hbase.rootdir", "hdfs://" + expectedNameService + 
"/hbase/test/root/dir");
+
+    // configure the hbase rootdir to use the nameservice URL
+    accumuloSiteProperties.put("instance.volumes", "hdfs://" + 
expectedNameService + "/accumulo/test/instance/volumes");
+
+    Configuration clusterConfig = new Configuration(properties, 
Collections.emptyMap());
+
+    Collection<String> hgComponents = new HashSet<>();
+    hgComponents.add("NAMENODE");
+    TestHostGroup group1 = new TestHostGroup(expectedHostGroupName, 
hgComponents, Collections.singleton(expectedHostName));
+
+    Collection<String> hgComponents2 = new HashSet<>();
+    hgComponents2.add("NAMENODE");
+    TestHostGroup group2 = new TestHostGroup("host-group-2", hgComponents2, 
Collections.singleton(expectedHostNameTwo));
+
+    // add third hostgroup with NAMENODE, to simulate HDFS NameNode Federation
+    TestHostGroup group3 = new TestHostGroup("host-group-3", 
Collections.singleton("NAMENODE"), 
Collections.singleton(expectedHostNameThree));
+
+    Collection<TestHostGroup> hostGroups = new ArrayList<>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+    hostGroups.add(group3);
+
+    expect(stack.getCardinality("NAMENODE")).andReturn(new 
Cardinality("1-2")).anyTimes();
+    expect(stack.getCardinality("SECONDARY_NAMENODE")).andReturn(new 
Cardinality("1")).anyTimes();
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(topology);
+
+    Set<String> updatedConfigTypes =
+      updater.doUpdateForClusterCreate();
+
+    // verify that dfs.internal.nameservices was added
+    assertEquals("dfs.internal.nameservices wasn't added", 
expectedNameService, hdfsSiteProperties.get("dfs.internal.nameservices"));
+
+    // verify that the expected hostname was substituted for the host group 
name in the config
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, 
hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + 
"." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, 
hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + 
"." + expectedNodeTwo));
+
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, 
hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." 
+ expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, 
hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." 
+ expectedNodeTwo));
+
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, 
hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." 
+ expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, 
hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." 
+ expectedNodeTwo));
+
+    assertEquals("fs.defaultFS should not be modified by cluster update when 
NameNode HA is enabled.",
+      "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
+
+    assertEquals("hbase.rootdir should not be modified by cluster update when 
NameNode HA is enabled.",
+      "hdfs://" + expectedNameService + "/hbase/test/root/dir", 
hbaseSiteProperties.get("hbase.rootdir"));
+
+    assertEquals("instance.volumes should not be modified by cluster update 
when NameNode HA is enabled.",
+      "hdfs://" + expectedNameService + "/accumulo/test/instance/volumes", 
accumuloSiteProperties.get("instance.volumes"));
+
+    // verify that the non-HA properties are filtered out in HA mode
+    assertFalse("dfs.namenode.http-address should have been filtered out of 
this HA configuration",
+      hdfsSiteProperties.containsKey("dfs.namenode.http-address"));
+    assertFalse("dfs.namenode.https-address should have been filtered out of 
this HA configuration",
+      hdfsSiteProperties.containsKey("dfs.namenode.https-address"));
+    assertFalse("dfs.namenode.rpc-address should have been filtered out of 
this HA configuration",
+      hdfsSiteProperties.containsKey("dfs.namenode.rpc-address"));
+
+
+    // verify that correct configuration types were listed as updated in the 
returned set
+    assertEquals("Incorrect number of updated config types returned, set = " + 
updatedConfigTypes,
+      3, updatedConfigTypes.size());
+    assertTrue("Expected config type not found in updated set",
+      updatedConfigTypes.contains("cluster-env"));
+    assertTrue("Expected config type not found in updated set",
+      updatedConfigTypes.contains("hdfs-site"));
+    assertTrue("Expected config type not found in updated set",
+      updatedConfigTypes.contains("hadoop-env"));
+  }
+
+  @Test
   public void testDoUpdateForClusterWithNameNodeHANotEnabled() throws 
Exception {
     final String expectedHostName = "c6401.apache.ambari.org";
     final String expectedHostNameTwo = "serverTwo";

-- 
To stop receiving notification emails like this one, please contact
rnettle...@apache.org.

Reply via email to