This fixes the fact that the last patch was submitted in the wrong version. It also fixes a bug where accidentally the node was not looked up properly in the ssh_port_map.
Signed-off-by: Helga Velroyen <[email protected]> --- lib/backend.py | 7 ++----- lib/cmdlib/cluster/verify.py | 16 +++------------- lib/cmdlib/node.py | 6 +----- lib/rpc_defs.py | 4 ---- lib/server/noded.py | 5 ++--- test/py/ganeti.backend_unittest.py | 4 ++-- 6 files changed, 10 insertions(+), 32 deletions(-) diff --git a/lib/backend.py b/lib/backend.py index b53fcb6..64b55e0 100644 --- a/lib/backend.py +++ b/lib/backend.py @@ -1100,7 +1100,7 @@ def _VerifySshClutter(node_status_list, my_name): return result -def VerifyNode(what, cluster_name, all_hvparams, node_groups): +def VerifyNode(what, cluster_name, all_hvparams): """Verify the status of the local node. Based on the input L{what} parameter, various checks are done on the @@ -1128,9 +1128,6 @@ def VerifyNode(what, cluster_name, all_hvparams, node_groups): @param cluster_name: the cluster's name @type all_hvparams: dict of dict of strings @param all_hvparams: a dictionary mapping hypervisor names to hvparams - @type node_groups: a dict of strings - @param node_groups: node _names_ mapped to their group uuids (it's enough to - have only those nodes that are in `what["nodelist"]`) @rtype: dict @return: a dictionary with the same keys as the input dict, and values representing the result of the checks @@ -1183,7 +1180,7 @@ def VerifyNode(what, cluster_name, all_hvparams, node_groups): # over which Ganeti has no power. if my_name in mcs: success, message = _GetSshRunner(cluster_name). \ - VerifyNodeHostname(node, ssh_port_map) + VerifyNodeHostname(node, ssh_port_map[node]) if not success: val[node] = message diff --git a/lib/cmdlib/cluster/verify.py b/lib/cmdlib/cluster/verify.py index 2b30772..e6fc13f 100644 --- a/lib/cmdlib/cluster/verify.py +++ b/lib/cmdlib/cluster/verify.py @@ -1963,10 +1963,6 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors): if self._exclusive_storage: node_verify_param[constants.NV_EXCLUSIVEPVS] = True - node_group_uuids = dict(map(lambda n: (n.name, n.group), - self.cfg.GetAllNodesInfo().values())) - groups_config = self.cfg.GetAllNodeGroupsInfoDict() - # At this point, we have the in-memory data structures complete, # except for the runtime information, which we'll gather next @@ -2000,9 +1996,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors): all_nvinfo = self.rpc.call_node_verify(self.my_node_uuids, node_verify_param, cluster_name, - hvparams, - node_group_uuids, - groups_config) + hvparams) nvinfo_endtime = time.time() if self.extra_lv_nodes and vg_name is not None: @@ -2012,9 +2006,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors): self.rpc.call_node_verify(self.extra_lv_nodes, {constants.NV_LVLIST: vg_name}, self.cfg.GetClusterName(), - self.cfg.GetClusterInfo().hvparams, - node_group_uuids, - groups_config) + self.cfg.GetClusterInfo().hvparams) else: extra_lv_nvinfo = {} @@ -2042,9 +2034,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors): feedback_fn("* Gathering information about the master node") vf_nvinfo.update(self.rpc.call_node_verify( additional_node_uuids, {key: node_verify_param[key]}, - self.cfg.GetClusterName(), self.cfg.GetClusterInfo().hvparams, - node_group_uuids, - groups_config)) + self.cfg.GetClusterName(), self.cfg.GetClusterInfo().hvparams)) else: vf_nvinfo = all_nvinfo vf_node_info = self.my_node_info.values() diff --git a/lib/cmdlib/node.py b/lib/cmdlib/node.py index 50e86ce..111de97 100644 --- a/lib/cmdlib/node.py +++ b/lib/cmdlib/node.py @@ -311,7 +311,6 @@ class LUNodeAdd(LogicalUnit): result = rpcrunner.call_node_verify_light( [node_name], vparams, cname, self.cfg.GetClusterInfo().hvparams, - {node_name: self.node_group}, )[node_name] (errmsgs, _) = CheckNodePVs(result.payload, excl_stor) if errmsgs: @@ -437,10 +436,7 @@ class LUNodeAdd(LogicalUnit): result = self.rpc.call_node_verify( node_verifier_uuids, node_verify_param, self.cfg.GetClusterName(), - self.cfg.GetClusterInfo().hvparams, - {self.new_node.name: self.cfg.LookupNodeGroup(self.node_group)}, - self.cfg.GetAllNodeGroupsInfoDict() - ) + self.cfg.GetClusterInfo().hvparams) for verifier in node_verifier_uuids: result[verifier].Raise("Cannot communicate with node %s" % verifier) nl_payload = result[verifier].payload[constants.NV_NODELIST] diff --git a/lib/rpc_defs.py b/lib/rpc_defs.py index aceb768..021807c 100644 --- a/lib/rpc_defs.py +++ b/lib/rpc_defs.py @@ -513,7 +513,6 @@ _NODE_CALLS = [ ("checkdict", None, "What to verify"), ("cluster_name", None, "Cluster name"), ("all_hvparams", None, "Dictionary mapping hypervisor names to hvparams"), - ("node_groups", None, "node names mapped to their group uuids"), ], None, None, "Request verification of given parameters"), ("node_volumes", MULTI, None, constants.RPC_TMO_FAST, [], None, None, "Gets all volumes on node(s)"), @@ -681,9 +680,6 @@ CALLS = { ("checkdict", None, "What to verify"), ("cluster_name", None, "Cluster name"), ("hvparams", None, "Dictionary mapping hypervisor names to hvparams"), - ("node_groups", None, "node names mapped to their group uuids"), - ("groups_cfg", None, - "a dictionary mapping group uuids to their configuration"), ], None, None, "Request verification of given parameters"), ]), "RpcClientConfig": _Prepare([ diff --git a/lib/server/noded.py b/lib/server/noded.py index 4ce0421..c73897c 100644 --- a/lib/server/noded.py +++ b/lib/server/noded.py @@ -821,9 +821,8 @@ class NodeRequestHandler(http.server.HttpServerHandler): """Run a verify sequence on this node. """ - (what, cluster_name, hvparams, node_groups) = params - return backend.VerifyNode(what, cluster_name, hvparams, - node_groups) + (what, cluster_name, hvparams) = params + return backend.VerifyNode(what, cluster_name, hvparams) @classmethod def perspective_node_verify_light(cls, params): diff --git a/test/py/ganeti.backend_unittest.py b/test/py/ganeti.backend_unittest.py index 9f58957..1a0a51d 100755 --- a/test/py/ganeti.backend_unittest.py +++ b/test/py/ganeti.backend_unittest.py @@ -135,7 +135,7 @@ class TestNodeVerify(testutils.GanetiTestCase): local_data = (netutils.Hostname.GetSysName(), constants.IP4_ADDRESS_LOCALHOST) result = backend.VerifyNode({constants.NV_MASTERIP: local_data}, - None, {}, {}) + None, {}) self.failUnless(constants.NV_MASTERIP in result, "Master IP data not returned") self.failUnless(result[constants.NV_MASTERIP], "Cannot reach localhost") @@ -147,7 +147,7 @@ class TestNodeVerify(testutils.GanetiTestCase): # we just test that whatever TcpPing returns, VerifyNode returns too netutils.TcpPing = lambda a, b, source=None: False result = backend.VerifyNode({constants.NV_MASTERIP: bad_data}, - None, {}, {}) + None, {}) self.failUnless(constants.NV_MASTERIP in result, "Master IP data not returned") self.failIf(result[constants.NV_MASTERIP], -- 2.6.0.rc2.230.g3dd15c0
