This is an automated email from the ASF dual-hosted git repository.

nitiraj pushed a commit to branch branch-2.7
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.7 by this push:
     new 604b682  AMBARI-24535 : File View not accessible in Ambari 2.7 after 
enabling 3 namenodes in HDP 3.0 (nitirajrathore) (#2351)
604b682 is described below

commit 604b682442ae58d9f5f1ed0196ff10a5c7bc4ad0
Author: nitirajrathore <nitiraj.rath...@gmail.com>
AuthorDate: Wed Sep 26 10:24:32 2018 +0530

    AMBARI-24535 : File View not accessible in Ambari 2.7 after enabling 3 
namenodes in HDP 3.0 (nitirajrathore) (#2351)
    
    * AMBARI-24535 : File View not accessible in Ambari 2.7 after enabling 3 
namenodes in HDP 3.0 (nitirajrathore)
    
    * AMBARI-24535 : review changes and handled changes for work flow manager. 
File View not accessible in Ambari 2.7 after enabling 3 namenodes in HDP 3.0 
(nitirajrathore)
---
 contrib/views/files/src/main/resources/view.xml    |  45 +---
 .../view/utils/hdfs/ConfigurationBuilder.java      |  91 ++++++---
 .../view/utils/hdfs/ConfigurationBuilderTest.java  |  48 ++++-
 .../views/wfmanager/src/main/resources/view.xml    | 227 ++++++++++-----------
 4 files changed, 228 insertions(+), 183 deletions(-)

diff --git a/contrib/views/files/src/main/resources/view.xml 
b/contrib/views/files/src/main/resources/view.xml
index a4513a6..09a1839 100644
--- a/contrib/views/files/src/main/resources/view.xml
+++ b/contrib/views/files/src/main/resources/view.xml
@@ -48,50 +48,23 @@
         <cluster-config>fake</cluster-config>
     </parameter>
     <parameter>
-        <name>webhdfs.ha.namenode.rpc-address.nn1</name>
-        <description>RPC address for first name node.
-            Value of 
hdfs-site/dfs.namenode.rpc-address.[nameservice].[namenode1] 
property</description>
-        <label>First NameNode RPC Address</label>
+        <name>webhdfs.ha.namenode.rpc-address.list</name>
+        <description>Comma separated RPC address for name nodes.</description>
+        <label>Comma separated NameNode RPC Addresses</label>
         <required>false</required>
         <cluster-config>fake</cluster-config>
     </parameter>
     <parameter>
-        <name>webhdfs.ha.namenode.rpc-address.nn2</name>
-        <description>RPC address for second name node.
-            Value of 
hdfs-site/dfs.namenode.rpc-address.[nameservice].[namenode2] 
property</description>
-        <label>Second NameNode RPC Address</label>
+        <name>webhdfs.ha.namenode.http-address.list</name>
+        <description>Comma separated WebHDFS address for name 
nodes.</description>
+        <label>Comma separated NameNode HTTP (WebHDFS) Addresses</label>
         <required>false</required>
         <cluster-config>fake</cluster-config>
     </parameter>
     <parameter>
-        <name>webhdfs.ha.namenode.http-address.nn1</name>
-        <description>WebHDFS address for first name node.
-            Value of 
hdfs-site/dfs.namenode.http-address.[nameservice].[namenode1] 
property</description>
-        <label>First NameNode HTTP (WebHDFS) Address</label>
-        <required>false</required>
-        <cluster-config>fake</cluster-config>
-    </parameter>
-    <parameter>
-        <name>webhdfs.ha.namenode.http-address.nn2</name>
-        <description>WebHDFS address for second name node.
-            Value of 
hdfs-site/dfs.namenode.http-address.[nameservice].[namenode2] 
property</description>
-        <label>Second NameNode HTTP (WebHDFS) Address</label>
-        <required>false</required>
-        <cluster-config>fake</cluster-config>
-    </parameter>
-    <parameter>
-        <name>webhdfs.ha.namenode.https-address.nn1</name>
-        <description>WebHDFS Https address for first name node.
-            Value of 
hdfs-site/dfs.namenode.https-address.[nameservice].[namenode1] 
property</description>
-        <label>First NameNode HTTPS (WebHDFS) Address</label>
-        <required>false</required>
-        <cluster-config>fake</cluster-config>
-    </parameter>
-    <parameter>
-        <name>webhdfs.ha.namenode.https-address.nn2</name>
-        <description>WebHDFS Https address for second name node.
-            Value of 
hdfs-site/dfs.namenode.https-address.[nameservice].[namenode2] 
property</description>
-        <label>Second NameNode HTTPS (WebHDFS) Address</label>
+        <name>webhdfs.ha.namenode.https-address.list</name>
+        <description>Comma separated WebHDFS Https addresses for name 
nodes.</description>
+        <label>Comma separated NameNode HTTPS (WebHDFS) Addresses</label>
         <required>false</required>
         <cluster-config>fake</cluster-config>
     </parameter>
diff --git 
a/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/ConfigurationBuilder.java
 
b/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/ConfigurationBuilder.java
index df0fd96..fc37b77 100644
--- 
a/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/ConfigurationBuilder.java
+++ 
b/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/ConfigurationBuilder.java
@@ -18,6 +18,8 @@
 
 package org.apache.ambari.view.utils.hdfs;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Strings;
 import org.apache.ambari.view.ViewContext;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -28,6 +30,9 @@ import java.io.IOException;
 import java.io.StringWriter;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -48,16 +53,13 @@ public class ConfigurationBuilder {
   public static final String HA_NAMENODES_INSTANCE_PROPERTY = 
"webhdfs.ha.namenodes.list";
 
   public static final String HA_NAMENODES_CLUSTER_PROPERTY = 
"dfs.ha.namenodes.%s";
-  public static final String NAMENODE_RPC_NN1_INSTANCE_PROPERTY = 
"webhdfs.ha.namenode.rpc-address.nn1";
-  public static final String NAMENODE_RPC_NN2_INSTANCE_PROPERTY = 
"webhdfs.ha.namenode.rpc-address.nn2";
+  public static final String NAMENODE_RPC_NN_INSTANCE_PROPERTY = 
"webhdfs.ha.namenode.rpc-address.list";
   public static final String NAMENODE_RPC_NN_CLUSTER_PROPERTY = 
"dfs.namenode.rpc-address.%s.%s";
 
-  public static final String NAMENODE_HTTP_NN1_INSTANCE_PROPERTY = 
"webhdfs.ha.namenode.http-address.nn1";
-  public static final String NAMENODE_HTTP_NN2_INSTANCE_PROPERTY = 
"webhdfs.ha.namenode.http-address.nn2";
+  public static final String NAMENODE_HTTP_NN_INSTANCE_PROPERTY = 
"webhdfs.ha.namenode.http-address.list";
   public static final String NAMENODE_HTTP_NN_CLUSTER_PROPERTY = 
"dfs.namenode.http-address.%s.%s";
 
-  public static final String NAMENODE_HTTPS_NN1_INSTANCE_PROPERTY = 
"webhdfs.ha.namenode.https-address.nn1";
-  public static final String NAMENODE_HTTPS_NN2_INSTANCE_PROPERTY = 
"webhdfs.ha.namenode.https-address.nn2";
+  public static final String NAMENODE_HTTPS_NN_INSTANCE_PROPERTY = 
"webhdfs.ha.namenode.https-address.list";
   public static final String NAMENODE_HTTPS_NN_CLUSTER_PROPERTY = 
"dfs.namenode.https-address.%s.%s";
 
   public static final String FAILOVER_PROXY_PROVIDER_INSTANCE_PROPERTY = 
"webhdfs.client.failover.proxy.provider";
@@ -74,7 +76,7 @@ public class ConfigurationBuilder {
   public static final String DFS_NAMENODE_HTTPS_ADDERSS = 
"dfs.namenode.https-address";
 
 
-  private Configuration conf = new Configuration();
+  protected Configuration conf = new Configuration();
   private ViewContext context;
   private AuthConfigurationBuilder authParamsBuilder;
   private Map<String, String> authParams;
@@ -137,7 +139,7 @@ public class ConfigurationBuilder {
 
   protected String getEncryptionKeyProviderUri() {
     //If KMS is configured, this value will not be empty
-    String encryptionKeyProviderUri = 
context.getCluster().getConfigurationValue("hdfs-site", 
"dfs.encryption.key.provider.uri");
+    String encryptionKeyProviderUri = getProperty("hdfs-site", 
"dfs.encryption.key.provider.uri");
     return encryptionKeyProviderUri;
   }
 
@@ -182,13 +184,17 @@ public class ConfigurationBuilder {
     String value;
 
     if (context.getCluster() != null) {
-      value = context.getCluster().getConfigurationValue(type, key);
+      value = getProperty(type, key);
     } else {
-      value = context.getProperties().get(instanceProperty);
+      value = getViewProperty(instanceProperty);
     }
     return value;
   }
 
+  private String getViewProperty(String instanceProperty) {
+    return context.getProperties().get(instanceProperty);
+  }
+
   private String getProperty(String type, String key) {
     if (context.getCluster() != null) {
       return context.getCluster().getConfigurationValue(type, key);
@@ -234,7 +240,8 @@ public class ConfigurationBuilder {
     }
   }
 
-  private void copyHAProperties(String defaultFS) throws URISyntaxException, 
HdfsApiException {
+  @VisibleForTesting
+  void copyHAProperties(String defaultFS) throws URISyntaxException, 
HdfsApiException {
     URI uri = new URI(defaultFS);
     String nameservice = uri.getHost();
 
@@ -243,29 +250,55 @@ public class ConfigurationBuilder {
       HA_NAMENODES_INSTANCE_PROPERTY);
 
     String[] namenodes = namenodeIDs.split(",");
-    if (namenodes.length != 2) {
-      throw new HdfsApiException("HDFS080 " + HA_NAMENODES_INSTANCE_PROPERTY + 
" namenodes count is not exactly 2");
+    //    get the property values from cluster.
+    //    If not found then get the values from view instance property.
+
+    List<String> rpcAddresses = new ArrayList<>(namenodes.length);
+    List<String> httpAddresses = new ArrayList<>(namenodes.length);
+    List<String> httpsAddresses = new ArrayList<>(namenodes.length);
+    for (String namenode : namenodes) {
+
+      String rpcAddress = getProperty(HDFS_SITE, 
String.format(NAMENODE_RPC_NN_CLUSTER_PROPERTY, nameservice, namenode));
+      if(!Strings.isNullOrEmpty(rpcAddress)) {
+        rpcAddresses.add(rpcAddress);
+      }
+
+      String httpAddress = getProperty(HDFS_SITE, 
String.format(NAMENODE_HTTP_NN_CLUSTER_PROPERTY, nameservice, namenode));
+      if(!Strings.isNullOrEmpty(httpAddress)) {
+        httpAddresses.add(httpAddress);
+      }
+
+      String httpsAddress = getProperty(HDFS_SITE, 
String.format(NAMENODE_HTTPS_NN_CLUSTER_PROPERTY, nameservice, namenode));
+      if(!Strings.isNullOrEmpty(httpsAddress)) {
+        httpsAddresses.add(httpsAddress);
+      }
+    }
+
+    addAddresses(rpcAddresses, NAMENODE_RPC_NN_INSTANCE_PROPERTY);
+    addAddresses(httpAddresses, NAMENODE_HTTP_NN_INSTANCE_PROPERTY);
+    addAddresses(httpsAddresses, NAMENODE_HTTPS_NN_INSTANCE_PROPERTY);
+
+    for (int i = 0 ; i < namenodes.length ; i++) {
+      conf.set( String.format(NAMENODE_RPC_NN_CLUSTER_PROPERTY, nameservice, 
namenodes[i]), rpcAddresses.get(i));
+      conf.set( String.format(NAMENODE_HTTP_NN_CLUSTER_PROPERTY, nameservice, 
namenodes[i]), httpAddresses.get(i));
+      conf.set( String.format(NAMENODE_HTTPS_NN_CLUSTER_PROPERTY, nameservice, 
namenodes[i]), httpsAddresses.get(i));
     }
-    //NN1
-    copyClusterProperty(String.format(NAMENODE_RPC_NN_CLUSTER_PROPERTY, 
nameservice, namenodes[0]),
-      NAMENODE_RPC_NN1_INSTANCE_PROPERTY);
-    copyClusterProperty(String.format(NAMENODE_HTTP_NN_CLUSTER_PROPERTY, 
nameservice, namenodes[0]),
-      NAMENODE_HTTP_NN1_INSTANCE_PROPERTY);
-    copyClusterProperty(String.format(NAMENODE_HTTPS_NN_CLUSTER_PROPERTY, 
nameservice, namenodes[0]),
-      NAMENODE_HTTPS_NN1_INSTANCE_PROPERTY);
-
-    //NN2
-    copyClusterProperty(String.format(NAMENODE_RPC_NN_CLUSTER_PROPERTY, 
nameservice, namenodes[1]),
-      NAMENODE_RPC_NN2_INSTANCE_PROPERTY);
-    copyClusterProperty(String.format(NAMENODE_HTTP_NN_CLUSTER_PROPERTY, 
nameservice, namenodes[1]),
-      NAMENODE_HTTP_NN2_INSTANCE_PROPERTY);
-    copyClusterProperty(String.format(NAMENODE_HTTPS_NN_CLUSTER_PROPERTY, 
nameservice, namenodes[1]),
-      NAMENODE_HTTPS_NN2_INSTANCE_PROPERTY);
 
     
copyClusterProperty(String.format(FAILOVER_PROXY_PROVIDER_CLUSTER_PROPERTY, 
nameservice),
       FAILOVER_PROXY_PROVIDER_INSTANCE_PROPERTY);
   }
 
+  private void addAddresses(List<String> addressList, String propertyName) {
+    if(addressList.isEmpty()){
+      //      get property from view instance configs
+      String addressString = getViewProperty(propertyName);
+      LOG.debug("value of {} in view is : {}", propertyName, addressString);
+      if(!Strings.isNullOrEmpty(addressString)){
+        addressList.addAll(Arrays.asList(addressString.split(",")));
+      }
+    }
+  }
+
   private String copyClusterProperty(String propertyName, String 
instancePropertyName) {
     String value = getProperty(HDFS_SITE, propertyName, instancePropertyName);
     if (!StringUtils.isEmpty(value)) {
@@ -327,7 +360,7 @@ public class ConfigurationBuilder {
     parseProperties();
     setAuthParams(buildAuthenticationConfig());
 
-    String umask = context.getProperties().get(UMASK_INSTANCE_PROPERTY);
+    String umask = getViewProperty(UMASK_INSTANCE_PROPERTY);
     if (umask != null && !umask.isEmpty()) conf.set(UMASK_CLUSTER_PROPERTY, 
umask);
 
     if(null != this.customProperties){
diff --git 
a/contrib/views/utils/src/test/java/org/apache/ambari/view/utils/hdfs/ConfigurationBuilderTest.java
 
b/contrib/views/utils/src/test/java/org/apache/ambari/view/utils/hdfs/ConfigurationBuilderTest.java
index 12c613a..1f3d774 100644
--- 
a/contrib/views/utils/src/test/java/org/apache/ambari/view/utils/hdfs/ConfigurationBuilderTest.java
+++ 
b/contrib/views/utils/src/test/java/org/apache/ambari/view/utils/hdfs/ConfigurationBuilderTest.java
@@ -18,17 +18,31 @@
 
 package org.apache.ambari.view.utils.hdfs;
 
+import com.google.common.base.Joiner;
 import org.apache.ambari.view.ViewContext;
 import org.apache.ambari.view.cluster.Cluster;
 import org.easymock.EasyMockSupport;
+import org.junit.Assert;
 import org.junit.Test;
 
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Properties;
 
+import static 
org.apache.ambari.view.utils.hdfs.ConfigurationBuilder.HA_NAMENODES_INSTANCE_PROPERTY;
+import static 
org.apache.ambari.view.utils.hdfs.ConfigurationBuilder.NAMENODE_HTTPS_NN_CLUSTER_PROPERTY;
+import static 
org.apache.ambari.view.utils.hdfs.ConfigurationBuilder.NAMENODE_HTTPS_NN_INSTANCE_PROPERTY;
+import static 
org.apache.ambari.view.utils.hdfs.ConfigurationBuilder.NAMENODE_HTTP_NN_CLUSTER_PROPERTY;
+import static 
org.apache.ambari.view.utils.hdfs.ConfigurationBuilder.NAMENODE_HTTP_NN_INSTANCE_PROPERTY;
+import static 
org.apache.ambari.view.utils.hdfs.ConfigurationBuilder.NAMENODE_RPC_NN_CLUSTER_PROPERTY;
+import static 
org.apache.ambari.view.utils.hdfs.ConfigurationBuilder.NAMENODE_RPC_NN_INSTANCE_PROPERTY;
+import static 
org.apache.ambari.view.utils.hdfs.ConfigurationBuilder.NAMESERVICES_INSTANCE_PROPERTY;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
 import static org.junit.Assert.*;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 public class ConfigurationBuilderTest extends EasyMockSupport {
   @Test
@@ -66,7 +80,7 @@ public class ConfigurationBuilderTest extends EasyMockSupport 
{
     replay(cluster);
 
     ViewContext viewContext = createNiceMock(ViewContext.class);
-    expect(viewContext.getCluster()).andReturn(cluster);
+    expect(viewContext.getCluster()).andReturn(cluster).anyTimes();
     Map<String, String> instanceProperties = new HashMap<>();
     
expect(viewContext.getProperties()).andReturn(instanceProperties).anyTimes();
     replay(viewContext);
@@ -76,4 +90,36 @@ public class ConfigurationBuilderTest extends 
EasyMockSupport {
 
     assertEquals(encryptionKeyProviderUri, keyProvider);
   }
+
+  @Test
+  public void testCopyHAProperties() throws Exception {
+    Map<String, String> properties = new HashMap();
+    String[] nnrpc = new String[]{"nn1rpc", "nn2rpc", "nn3rpc"};
+    String[] nnhttp = new String[]{"nn1http", "nn2http", "nn3http"};
+    String[] nnhttps = new String[]{"nn1https", "nn2https", "nn3https"};
+
+    String nameservice = "mycluster";
+    String nameNodesString = "nn1,nn2,nn3";
+    String[] namenodes = nameNodesString.split(",");
+    properties.put(NAMESERVICES_INSTANCE_PROPERTY, nameservice);
+    properties.put(HA_NAMENODES_INSTANCE_PROPERTY, nameNodesString);
+    properties.put(NAMENODE_RPC_NN_INSTANCE_PROPERTY, 
Joiner.on(",").join(Arrays.asList(nnrpc)));
+    properties.put(NAMENODE_HTTP_NN_INSTANCE_PROPERTY, 
Joiner.on(",").join(Arrays.asList(nnhttp)));
+    properties.put(NAMENODE_HTTPS_NN_INSTANCE_PROPERTY, 
Joiner.on(",").join(Arrays.asList(nnhttps)));
+
+    String defaultFS = "webhdfs://" + nameservice;
+    Cluster cluster = mock(Cluster.class);
+    ViewContext viewContext = mock(ViewContext.class);
+    when(viewContext.getCluster()).thenReturn(null);
+    when(viewContext.getProperties()).thenReturn(properties);
+
+    ConfigurationBuilder configurationBuilder = new 
ConfigurationBuilder(viewContext);
+    configurationBuilder.copyHAProperties(defaultFS);
+
+    for(int i = 0 ; i < nnhttp.length; i++) {
+      Assert.assertEquals("name node rpc address not correct.", nnrpc[i], 
configurationBuilder.conf.get(String.format(NAMENODE_RPC_NN_CLUSTER_PROPERTY, 
nameservice, namenodes[i])));
+      Assert.assertEquals("name node http address not correct.", nnhttp[i], 
configurationBuilder.conf.get(String.format(NAMENODE_HTTP_NN_CLUSTER_PROPERTY, 
nameservice, namenodes[i])));
+      Assert.assertEquals("name node https address not correct.", nnhttps[i], 
configurationBuilder.conf.get(String.format(NAMENODE_HTTPS_NN_CLUSTER_PROPERTY, 
nameservice, namenodes[i])));
+    }
+  }
 }
diff --git a/contrib/views/wfmanager/src/main/resources/view.xml 
b/contrib/views/wfmanager/src/main/resources/view.xml
index 85cf3e5..3ae3581 100644
--- a/contrib/views/wfmanager/src/main/resources/view.xml
+++ b/contrib/views/wfmanager/src/main/resources/view.xml
@@ -24,7 +24,7 @@
     <name>proxy</name>
     
<service-class>org.apache.oozie.ambari.view.OozieProxyImpersonator</service-class>
   </resource>
-       
+
   <parameter>
     <name>oozie.service.uri</name>
     <description>Oozie service URI for the Oozie API.</description>
@@ -34,131 +34,124 @@
   </parameter>
 
 
-      <parameter>
-        <name>webhdfs.url</name>
-        <description>Enter the WebHDFS FileSystem URI. Typically this is the 
dfs.namenode.http-address
-            property in the hdfs-site.xml configuration. URL must be 
accessible from Ambari Server.</description>
-        <label>WebHDFS FileSystem URI</label>
-        <required>true</required>
-        <cluster-config>core-site/fs.defaultFS</cluster-config>
-    </parameter>
-    <parameter>
-        <name>webhdfs.nameservices</name>
-        <description>Comma-separated list of nameservices. Value of 
hdfs-site/dfs.nameservices property</description>
-        <label>Logical name of the NameNode cluster</label>
-        <required>false</required>
-        <cluster-config>hdfs-site/dfs.nameservices</cluster-config>
-    </parameter>
-    <parameter>
-        <name>webhdfs.ha.namenodes.list</name>
-        <description>Comma-separated list of namenodes for a given nameservice.
-            Value of hdfs-site/dfs.ha.namenodes.[nameservice] 
property</description>
-        <label>List of NameNodes</label>
-        <required>false</required>
-        <cluster-config>fake</cluster-config>
-    </parameter>
-    <parameter>
-        <name>webhdfs.ha.namenode.rpc-address.nn1</name>
-        <description>RPC address for first name node.
-            Value of 
hdfs-site/dfs.namenode.rpc-address.[nameservice].[namenode1] 
property</description>
-        <label>First NameNode RPC Address</label>
-        <required>false</required>
-        <cluster-config>fake</cluster-config>
-    </parameter>
-    <parameter>
-        <name>webhdfs.ha.namenode.rpc-address.nn2</name>
-        <description>RPC address for second name node.
-            Value of 
hdfs-site/dfs.namenode.rpc-address.[nameservice].[namenode2] 
property</description>
-        <label>Second NameNode RPC Address</label>
-        <required>false</required>
-        <cluster-config>fake</cluster-config>
-    </parameter>
-    <parameter>
-        <name>webhdfs.ha.namenode.http-address.nn1</name>
-        <description>WebHDFS address for first name node.
-            Value of 
hdfs-site/dfs.namenode.http-address.[nameservice].[namenode1] 
property</description>
-        <label>First NameNode HTTP (WebHDFS) Address</label>
-        <required>false</required>
-        <cluster-config>fake</cluster-config>
-    </parameter>
-    <parameter>
-        <name>webhdfs.ha.namenode.http-address.nn2</name>
-        <description>WebHDFS address for second name node.
-            Value of 
hdfs-site/dfs.namenode.http-address.[nameservice].[namenode2] 
property</description>
-        <label>Second NameNode HTTP (WebHDFS) Address</label>
-        <required>false</required>
-        <cluster-config>fake</cluster-config>
-    </parameter>
-    <parameter>
-        <name>webhdfs.client.failover.proxy.provider</name>
-        <description>The Java class that HDFS clients use to contact the 
Active NameNode
-            Value of 
hdfs-site/dfs.client.failover.proxy.provider.[nameservice] 
property</description>
-        <label>Failover Proxy Provider</label>
-        <required>false</required>
-        <cluster-config>fake</cluster-config>
-    </parameter>
-    <parameter>
-        <name>hdfs.auth_to_local</name>
-        <description>Auth to Local Configuration</description>
-        <label>Auth To Local</label>
-        <required>false</required>
-        
<cluster-config>core-site/hadoop.security.auth_to_local</cluster-config>
-    </parameter>
-    <parameter>
-        <name>webhdfs.username</name>
-        <description>doAs for proxy user for HDFS. By default, uses the 
currently logged-in Ambari user.</description>
-        <label>WebHDFS Username</label>
-        <default-value>${username}</default-value>
-        <required>false</required>
-    </parameter>
-    <parameter>
-        <name>webhdfs.auth</name>
-        <description>Semicolon-separated authentication configs.</description>
-        <placeholder>auth=SIMPLE</placeholder>
-        <label>WebHDFS Authorization</label>
-        <required>false</required>
-    </parameter>
-       <parameter>
-        <name>hadoop.security.authentication</name>
-        <description>Security Authentication (simple/kerberos).</description>
-        <label>Security Authentication Type</label>
-        <placeholder>simple</placeholder>
-        
<cluster-config>core-site/hadoop.security.authentication</cluster-config>
-               <default-value>simple</default-value>
-        <required>true</required>
-    </parameter>
-    <parameter>
-        <name>yarn.resourcemanager.address</name>
-        <description>yarn.resourcemanager.address</description>
-        <label>yarn.resourcemanager.address</label>
-        <placeholder>http://sandbox.hortonworks.com:8050</placeholder>
-        <cluster-config>yarn-site/yarn.resourcemanager.address</cluster-config>
-        <required>true</required>
-    </parameter>
+  <parameter>
+    <name>webhdfs.url</name>
+    <description>Enter the WebHDFS FileSystem URI. Typically this is the 
dfs.namenode.http-address
+      property in the hdfs-site.xml configuration. URL must be accessible from 
Ambari Server.
+    </description>
+    <label>WebHDFS FileSystem URI</label>
+    <required>true</required>
+    <cluster-config>core-site/fs.defaultFS</cluster-config>
+  </parameter>
+  <parameter>
+    <name>webhdfs.nameservices</name>
+    <description>Comma-separated list of nameservices. Value of 
hdfs-site/dfs.nameservices property</description>
+    <label>Logical name of the NameNode cluster</label>
+    <required>false</required>
+    <cluster-config>hdfs-site/dfs.nameservices</cluster-config>
+  </parameter>
+  <parameter>
+    <name>webhdfs.ha.namenodes.list</name>
+    <description>Comma-separated list of namenodes for a given nameservice.
+      Value of hdfs-site/dfs.ha.namenodes.[nameservice] property
+    </description>
+    <label>List of NameNodes</label>
+    <required>false</required>
+    <cluster-config>fake</cluster-config>
+  </parameter>
+  <parameter>
+    <name>webhdfs.ha.namenode.rpc-address.list</name>
+    <description>Comma separated RPC address for name nodes.</description>
+    <label>Comma separated NameNode RPC Addresses</label>
+    <required>false</required>
+    <cluster-config>fake</cluster-config>
+  </parameter>
+  <parameter>
+    <name>webhdfs.ha.namenode.http-address.list</name>
+    <description>Comma separated WebHDFS address for name nodes.</description>
+    <label>Comma separated NameNode HTTP (WebHDFS) Addresses</label>
+    <required>false</required>
+    <cluster-config>fake</cluster-config>
+  </parameter>
+  <parameter>
+    <name>webhdfs.ha.namenode.https-address.list</name>
+    <description>Comma separated WebHDFS Https addresses for name 
nodes.</description>
+    <label>Comma separated NameNode HTTPS (WebHDFS) Addresses</label>
+    <required>false</required>
+    <cluster-config>fake</cluster-config>
+  </parameter>
+  <parameter>
+    <name>webhdfs.client.failover.proxy.provider</name>
+    <description>The Java class that HDFS clients use to contact the Active 
NameNode
+      Value of hdfs-site/dfs.client.failover.proxy.provider.[nameservice] 
property
+    </description>
+    <label>Failover Proxy Provider</label>
+    <required>false</required>
+    <cluster-config>fake</cluster-config>
+  </parameter>
+  <parameter>
+    <name>hdfs.auth_to_local</name>
+    <description>Auth to Local Configuration</description>
+    <label>Auth To Local</label>
+    <required>false</required>
+    <cluster-config>core-site/hadoop.security.auth_to_local</cluster-config>
+  </parameter>
+  <parameter>
+    <name>webhdfs.username</name>
+    <description>doAs for proxy user for HDFS. By default, uses the currently 
logged-in Ambari user.</description>
+    <label>WebHDFS Username</label>
+    <default-value>${username}</default-value>
+    <required>false</required>
+  </parameter>
+  <parameter>
+    <name>webhdfs.auth</name>
+    <description>Semicolon-separated authentication configs.</description>
+    <placeholder>auth=SIMPLE</placeholder>
+    <label>WebHDFS Authorization</label>
+    <required>false</required>
+  </parameter>
+  <parameter>
+    <name>hadoop.security.authentication</name>
+    <description>Security Authentication (simple/kerberos).</description>
+    <label>Security Authentication Type</label>
+    <placeholder>simple</placeholder>
+    <cluster-config>core-site/hadoop.security.authentication</cluster-config>
+    <default-value>simple</default-value>
+    <required>true</required>
+  </parameter>
+  <parameter>
+    <name>yarn.resourcemanager.address</name>
+    <description>yarn.resourcemanager.address</description>
+    <label>yarn.resourcemanager.address</label>
+    <placeholder>http://sandbox.hortonworks.com:8050</placeholder>
+    <cluster-config>yarn-site/yarn.resourcemanager.address</cluster-config>
+    <required>true</required>
+  </parameter>
 
   <parameter>
     <name>view.conf.keyvalues</name>
     <description>The key values that will be copied to hdfs connection 
configuration verbatim. Format : key1=value1;
-      key2=value2</description>
+      key2=value2
+    </description>
     <label>View Configs</label>
     <required>false</required>
   </parameter>
 
-    <persistence>
-        <entity>
-             
<class>org.apache.oozie.ambari.view.workflowmanager.model.Workflow</class>
-            <id-property>id</id-property>
-        </entity>
-        <entity>
-            
<class>org.apache.oozie.ambari.view.assets.model.ActionAssetDefinition</class>
-            <id-property>id</id-property>
-        </entity>
-        <entity>
-            
<class>org.apache.oozie.ambari.view.assets.model.ActionAsset</class>
-            <id-property>id</id-property>
-        </entity>
+  <persistence>
+    <entity>
+      
<class>org.apache.oozie.ambari.view.workflowmanager.model.Workflow</class>
+      <id-property>id</id-property>
+    </entity>
+    <entity>
+      
<class>org.apache.oozie.ambari.view.assets.model.ActionAssetDefinition</class>
+      <id-property>id</id-property>
+    </entity>
+    <entity>
+      <class>org.apache.oozie.ambari.view.assets.model.ActionAsset</class>
+      <id-property>id</id-property>
+    </entity>
 
-    </persistence>
+  </persistence>
 
   <!--<auto-instance>
     <name>AUTO_OOZIE_VIEW</name>

Reply via email to