Repository: ambari
Updated Branches:
  refs/heads/trunk b722ffa6f -> d6d247983


AMBARI-19736. Format ZKFC commands failing while enabling NameNode HA (stoader)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d6d24798
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d6d24798
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d6d24798

Branch: refs/heads/trunk
Commit: d6d247983c5c66c11260a6d10e7b0e2746fa8146
Parents: b722ffa
Author: Toader, Sebastian <stoa...@hortonworks.com>
Authored: Mon Jan 30 15:54:20 2017 +0100
Committer: Toader, Sebastian <stoa...@hortonworks.com>
Committed: Mon Jan 30 15:54:20 2017 +0100

----------------------------------------------------------------------
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |  4 ++
 .../HDFS/2.1.0.2.0/package/scripts/utils.py     | 14 ++++---
 .../2.1.0.2.0/package/scripts/zkfc_slave.py     |  5 ---
 .../3.0.0.3.0/package/scripts/hdfs_namenode.py  |  4 ++
 .../HDFS/3.0.0.3.0/package/scripts/utils.py     | 42 +++++++++++---------
 .../3.0.0.3.0/package/scripts/zkfc_slave.py     |  7 ----
 .../2.0.6/hooks/before-ANY/scripts/params.py    | 16 ++++++--
 .../HDP/3.0/hooks/before-ANY/scripts/params.py  | 19 +++++++--
 8 files changed, 68 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d6d24798/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index a2edf38..0489792 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -34,6 +34,7 @@ from resource_management.libraries.functions import Direction
 from ambari_commons import OSCheck, OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
 from utils import get_dfsadmin_base_command
+from utils import set_up_zkfc_security
 
 if OSCheck.is_windows_family():
   from resource_management.libraries.functions.windows_service_utils import 
check_windows_service_status
@@ -95,6 +96,9 @@ def namenode(action=None, hdfs_binary=None, do_format=True, 
upgrade_type=None,
     #we need this directory to be present before any action(HA manual steps for
     #additional namenode)
     create_name_dirs(params.dfs_name_dir)
+
+    # set up failover /  secure zookeper ACLs, this feature is supported from 
HDP 2.6 ownwards
+    set_up_zkfc_security(params)
   elif action == "start":
     Logger.info("Called service {0} with upgrade_type: {1}".format(action, 
str(upgrade_type)))
     setup_ranger_hdfs(upgrade_type=upgrade_type)

http://git-wip-us.apache.org/repos/asf/ambari/blob/d6d24798/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
index 03aba7b..d8d0515 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
@@ -387,20 +387,24 @@ def get_dfsadmin_base_command(hdfs_binary, 
use_specific_namenode = False):
 def set_up_zkfc_security(params):
     """ Sets up security for accessing zookeper on secure clusters """
 
-    # check if the namenode is HA (this may be redundant as the component is 
only installed if affirmative)
+    if params.stack_supports_zk_security is False:
+      Logger.info("Skipping setting up secure ZNode ACL for HFDS as it's 
supported only for HDP 2.6 and above.")
+      return
+
+    # check if the namenode is HA
     if params.dfs_ha_enabled is False:
-        Logger.info("The namenode is not HA, zkfc security setup skipped.")
+        Logger.info("Skipping setting up secure ZNode ACL for HFDS as it's 
supported only for NameNode HA mode.")
         return
 
     # check if the cluster is secure (skip otherwise)
     if params.security_enabled is False:
-        Logger.info("The cluster is not secure, zkfc security setup skipped.")
+        Logger.info("Skipping setting up secure ZNode ACL for HFDS as it's 
supported only for secure clusters.")
         return
 
     # process the JAAS template
     File(os.path.join(params.hadoop_conf_secure_dir, 'hdfs_jaas.conf'),
-         owner='root',
-         group='root',
+         owner=params.hdfs_user,
+         group=params.user_group,
          mode=0644,
          content=Template("hdfs_jaas.conf.j2")
          )

http://git-wip-us.apache.org/repos/asf/ambari/blob/d6d24798/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
index bfc9429..c2ff457 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
@@ -62,11 +62,6 @@ class ZkfcSlave(Script):
     import params
     env.set_params(params)
     hdfs("zkfc_slave")
-
-    # set up failover /  zookeper ACLs, this feature is supported from HDP 2.6 
ownwards
-    if params.stack_supports_zk_security:
-      utils.set_up_zkfc_security(params)
-
     pass
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)

http://git-wip-us.apache.org/repos/asf/ambari/blob/d6d24798/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
 
b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
index 7fae57f..a0ed658 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
@@ -34,6 +34,7 @@ from resource_management.libraries.functions import Direction
 from ambari_commons import OSCheck, OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
 from utils import get_dfsadmin_base_command
+from utils import set_up_zkfc_security
 
 if OSCheck.is_windows_family():
   from resource_management.libraries.functions.windows_service_utils import 
check_windows_service_status
@@ -95,6 +96,9 @@ def namenode(action=None, hdfs_binary=None, do_format=True, 
upgrade_type=None,
     #we need this directory to be present before any action(HA manual steps for
     #additional namenode)
     create_name_dirs(params.dfs_name_dir)
+
+    # set up failover /  secure zookeper ACLs, this feature is supported from 
HDP 2.6 ownwards
+    set_up_zkfc_security(params)
   elif action == "start":
     Logger.info("Called service {0} with upgrade_type: {1}".format(action, 
str(upgrade_type)))
     setup_ranger_hdfs(upgrade_type=upgrade_type)

http://git-wip-us.apache.org/repos/asf/ambari/blob/d6d24798/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/utils.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/utils.py
 
b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/utils.py
index 9eebe63..48f5a1f 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/utils.py
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/utils.py
@@ -386,23 +386,27 @@ def get_dfsadmin_base_command(hdfs_binary, 
use_specific_namenode = False):
 
 
 def set_up_zkfc_security(params):
-    """ Sets up security for accessing zookeper on secure clusters """
-
-    # check if the namenode is HA (this may be redundant as the component is 
only installed if affirmative)
-    if params.dfs_ha_enabled is False:
-        Logger.info("The namenode is not HA, zkfc security setup skipped.")
-        return
-
-    # check if the cluster is secure (skip otherwise)
-    if params.security_enabled is False:
-        Logger.info("The cluster is not secure, zkfc security setup skipped.")
-        return
-
-    # process the JAAS template
-    File(os.path.join(params.hadoop_conf_secure_dir, 'hdfs_jaas.conf'),
-         owner='root',
-         group='root',
-         mode=0644,
-         content=Template("hdfs_jaas.conf.j2")
-         )
+  """ Sets up security for accessing zookeper on secure clusters """
+
+  if params.stack_supports_zk_security is False:
+    Logger.info("Skipping setting up secure ZNode ACL for HFDS as it's 
supported only for HDP 2.6 and above.")
+    return
+
+  # check if the namenode is HA
+  if params.dfs_ha_enabled is False:
+    Logger.info("Skipping setting up secure ZNode ACL for HFDS as it's 
supported only for NameNode HA mode.")
+    return
+
+  # check if the cluster is secure (skip otherwise)
+  if params.security_enabled is False:
+    Logger.info("Skipping setting up secure ZNode ACL for HFDS as it's 
supported only for secure clusters.")
+    return
+
+  # process the JAAS template
+  File(os.path.join(params.hadoop_conf_secure_dir, 'hdfs_jaas.conf'),
+       owner=params.hdfs_user,
+       group=params.user_group,
+       mode=0644,
+       content=Template("hdfs_jaas.conf.j2")
+       )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d6d24798/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py
 
b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py
index f2ea6ad..6a0d71f 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py
@@ -36,9 +36,7 @@ from resource_management.libraries.functions.security_commons 
import get_params_
 from resource_management.libraries.functions.security_commons import 
validate_security_config_properties
 from resource_management.libraries.functions.security_commons import 
FILE_TYPE_XML
 from resource_management.libraries.functions.stack_features import 
check_stack_feature
-from resource_management.libraries.functions.version import compare_versions
 from resource_management.libraries.script import Script
-from resource_management.libraries.functions.version_select_util import 
get_component_version
 from resource_management.core.resources.zkmigrator import ZkMigrator
 
 class ZkfcSlave(Script):
@@ -62,11 +60,6 @@ class ZkfcSlave(Script):
     import params
     env.set_params(params)
     hdfs("zkfc_slave")
-
-    # set up failover /  zookeper ACLs, this feature is supported from HDP 2.6 
ownwards
-    if params.stack_supports_zk_security:
-      utils.set_up_zkfc_security(params)
-
     pass
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)

http://git-wip-us.apache.org/repos/asf/ambari/blob/d6d24798/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
index 8e0e783..a748b33 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
@@ -182,7 +182,6 @@ oozie_servers = default("/clusterHostInfo/oozie_server", [])
 falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
 ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
 zeppelin_master_hosts = default("/clusterHostInfo/zeppelin_master_hosts", [])
-zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
 
 # get the correct version to use for checking stack features
 version_for_stack_feature_checks = get_stack_feature_version(config)
@@ -196,9 +195,20 @@ has_oozie_server = not len(oozie_servers) == 0
 has_falcon_server_hosts = not len(falcon_server_hosts) == 0
 has_ranger_admin = not len(ranger_admin_hosts) == 0
 has_zeppelin_master = not len(zeppelin_master_hosts) == 0
-has_zkfc_hosts = not len(zkfc_hosts)== 0
 stack_supports_zk_security = 
check_stack_feature(StackFeature.SECURE_ZOOKEEPER, 
version_for_stack_feature_checks)
 
+# HDFS High Availability properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = 
default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', 
None)
+dfs_ha_namenode_ids = 
default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"),
 None)
+if dfs_ha_namenode_ids:
+  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+
 if has_namenode or dfs_type == 'HCFS':
     hadoop_conf_dir = 
conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
     hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
@@ -246,5 +256,5 @@ tez_am_view_acls = 
config['configurations']['tez-site']["tez.am.view-acls"]
 override_uid = str(default("/configurations/cluster-env/override_uid", 
"true")).lower()
 
 # if NN HA on secure clutser, access Zookeper securely
-if stack_supports_zk_security and has_zkfc_hosts and security_enabled:
+if stack_supports_zk_security and dfs_ha_enabled and security_enabled:
     hadoop_zkfc_opts=format("-Dzookeeper.sasl.client=true 
-Dzookeeper.sasl.client.username=zookeeper 
-Djava.security.auth.login.config={hadoop_conf_secure_dir}/hdfs_jaas.conf 
-Dzookeeper.sasl.clientconfig=Client")

http://git-wip-us.apache.org/repos/asf/ambari/blob/d6d24798/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
index 8e5d210..9be9101 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
@@ -178,7 +178,6 @@ oozie_servers = default("/clusterHostInfo/oozie_server", [])
 falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
 ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
 zeppelin_master_hosts = default("/clusterHostInfo/zeppelin_master_hosts", [])
-zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
 
 # get the correct version to use for checking stack features
 version_for_stack_feature_checks = get_stack_feature_version(config)
@@ -191,9 +190,21 @@ has_oozie_server = not len(oozie_servers) == 0
 has_falcon_server_hosts = not len(falcon_server_hosts) == 0
 has_ranger_admin = not len(ranger_admin_hosts) == 0
 has_zeppelin_master = not len(zeppelin_master_hosts) == 0
-has_zkfc_hosts = not len(zkfc_hosts)== 0
 stack_supports_zk_security = 
check_stack_feature(StackFeature.SECURE_ZOOKEEPER, 
version_for_stack_feature_checks)
 
+# HDFS High Availability properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = 
default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', 
None)
+dfs_ha_namenode_ids = 
default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"),
 None)
+if dfs_ha_namenode_ids:
+  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+
+
 if has_namenode or dfs_type == 'HCFS':
     hadoop_conf_dir = 
conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
     hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
@@ -241,5 +252,5 @@ tez_am_view_acls = 
config['configurations']['tez-site']["tez.am.view-acls"]
 override_uid = str(default("/configurations/cluster-env/override_uid", 
"true")).lower()
 
 # if NN HA on secure clutser, access Zookeper securely
-if stack_supports_zk_security and has_zkfc_hosts and security_enabled:
-    hadoop_zkfc_opts=format("-Dzookeeper.sasl.client=true 
-Dzookeeper.sasl.client.username=zookeeper 
-Djava.security.auth.login.config={hadoop_conf_secure_dir}/hdfs_jaas.conf 
-Dzookeeper.sasl.clientconfig=Client")
+if stack_supports_zk_security and dfs_ha_enabled and security_enabled:
+  hadoop_zkfc_opts=format("-Dzookeeper.sasl.client=true 
-Dzookeeper.sasl.client.username=zookeeper 
-Djava.security.auth.login.config={hadoop_conf_secure_dir}/hdfs_jaas.conf 
-Dzookeeper.sasl.clientconfig=Client")

Reply via email to