Use 'GetInstanceNodes' to get the nodes of an instance, instead of the
'all_nodes' Instance property.

Signed-off-by: Ilias Tsitsimpis <[email protected]>
---
 lib/cmdlib/cluster.py             | 23 ++++++++++++++---------
 lib/cmdlib/common.py              |  6 ++++--
 lib/cmdlib/group.py               |  8 ++++----
 lib/cmdlib/instance.py            | 26 ++++++++++++++------------
 lib/cmdlib/instance_operation.py  | 18 +++++++++++-------
 lib/cmdlib/instance_query.py      |  3 ++-
 lib/cmdlib/instance_storage.py    | 29 +++++++++++++++++------------
 lib/cmdlib/node.py                | 10 ++++++----
 lib/config.py                     |  4 ++--
 lib/objects.py                    | 29 -----------------------------
 test/py/cmdlib/cmdlib_unittest.py |  5 +++++
 11 files changed, 79 insertions(+), 82 deletions(-)

diff --git a/lib/cmdlib/cluster.py b/lib/cmdlib/cluster.py
index e2b834b..c86b0fb 100644
--- a/lib/cmdlib/cluster.py
+++ b/lib/cmdlib/cluster.py
@@ -944,9 +944,10 @@ class LUClusterSetParams(LogicalUnit):
       all_instances = self.cfg.GetAllInstancesInfo().values()
       violations = set()
       for group in self.cfg.GetAllNodeGroupsInfo().values():
-        instances = frozenset([inst for inst in all_instances
-                               if compat.any(nuuid in group.members
-                                             for nuuid in inst.all_nodes)])
+        instances = frozenset(
+          [inst for inst in all_instances
+           if compat.any(nuuid in group.members
+             for nuuid in self.cfg.GetInstanceNodes(inst))])
         new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
         ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
         new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
@@ -2024,7 +2025,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     for inst in self.my_inst_info.values():
       if inst.disk_template in constants.DTS_INT_MIRROR:
-        for nuuid in inst.all_nodes:
+        inst_nodes = self.cfg.GetInstanceNodes(inst)
+        for nuuid in inst_nodes:
           if self.all_node_info[nuuid].group != self.group_uuid:
             extra_lv_nodes.add(nuuid)
 
@@ -2378,7 +2380,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                   utils.CommaJoin(secondary_nodes),
                   code=self.ETYPE_WARNING)
 
-    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, instance.all_nodes)
+    inst_nodes = self.cfg.GetInstanceNodes(instance)
+    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes)
     if any(es_flags.values()):
       if instance.disk_template not in constants.DTS_EXCL_STORAGE:
         # Disk template not compatible with exclusive_storage: no instance
@@ -2399,7 +2402,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                       " gnt-cluster repair-disk-sizes", idx)
 
     if instance.disk_template in constants.DTS_INT_MIRROR:
-      instance_nodes = utils.NiceSort(instance.all_nodes)
+      instance_nodes = utils.NiceSort(inst_nodes)
       instance_groups = {}
 
       for node_uuid in instance_nodes:
@@ -2436,7 +2439,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                   instance.name, "instance has offline secondary node(s) %s",
                   utils.CommaJoin(self.cfg.GetNodeNames(inst_nodes_offline)))
     # ... or ghost/non-vm_capable nodes
-    for node_uuid in instance.all_nodes:
+    for node_uuid in inst_nodes:
       self._ErrorIf(node_image[node_uuid].ghost, constants.CV_EINSTANCEBADNODE,
                     instance.name, "instance lives on ghost node %s",
                     self.cfg.GetNodeName(node_uuid))
@@ -3136,7 +3139,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       instdisk[inst_uuid] = {}
 
     assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
-                      len(nuuids) <= len(instanceinfo[inst].all_nodes) and
+                      len(nuuids) <= len(
+                        self.cfg.GetInstanceNodes(instanceinfo[inst])) and
                       compat.all(isinstance(s, (tuple, list)) and
                                  len(s) == 2 for s in statuses)
                       for inst, nuuids in instdisk.items()
@@ -3333,7 +3337,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       if instance.admin_state == constants.ADMINST_OFFLINE:
         i_offline += 1
 
-      for nuuid in instance.all_nodes:
+      inst_nodes = self.cfg.GetInstanceNodes(instance)
+      for nuuid in inst_nodes:
         if nuuid not in node_image:
           gnode = self.NodeImage(uuid=nuuid)
           gnode.ghost = (nuuid not in self.all_node_info)
diff --git a/lib/cmdlib/common.py b/lib/cmdlib/common.py
index 7572a72..548be3d 100644
--- a/lib/cmdlib/common.py
+++ b/lib/cmdlib/common.py
@@ -614,7 +614,8 @@ def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
   be_full = cfg.GetClusterInfo().FillBE(instance)
   mem_size = be_full[constants.BE_MAXMEM]
   cpu_count = be_full[constants.BE_VCPUS]
-  es_flags = rpc.GetExclusiveStorageForNodes(cfg, instance.all_nodes)
+  inst_nodes = cfg.GetInstanceNodes(instance)
+  es_flags = rpc.GetExclusiveStorageForNodes(cfg, inst_nodes)
   if any(es_flags.values()):
     # With exclusive storage use the actual spindles
     try:
@@ -856,7 +857,8 @@ def CheckInstancesNodeGroups(cfg, instances, owned_groups, 
owned_node_uuids,
 
   """
   for (uuid, inst) in instances.items():
-    assert owned_node_uuids.issuperset(inst.all_nodes), \
+    inst_nodes = cfg.GetInstanceNodes(inst)
+    assert owned_node_uuids.issuperset(inst_nodes), \
       "Instance %s's nodes changed while we kept the lock" % inst.name
 
     inst_groups = CheckInstanceNodeGroups(cfg, uuid, owned_groups)
diff --git a/lib/cmdlib/group.py b/lib/cmdlib/group.py
index ac4d28e..d8b30e6 100644
--- a/lib/cmdlib/group.py
+++ b/lib/cmdlib/group.py
@@ -276,8 +276,7 @@ class LUGroupAssignNodes(NoHooksLU):
 
     self.cfg.AssignGroupNodes(mods)
 
-  @staticmethod
-  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
+  def CheckAssignmentForSplitInstances(self, changes, node_data, 
instance_data):
     """Check for split instances after a node assignment.
 
     This method considers a series of node assignments as an atomic operation,
@@ -310,12 +309,13 @@ class LUGroupAssignNodes(NoHooksLU):
       if inst.disk_template not in constants.DTS_INT_MIRROR:
         continue
 
+      inst_nodes = self.cfg.GetInstanceNodes(inst)
       if len(set(node_data[node_uuid].group
-                 for node_uuid in inst.all_nodes)) > 1:
+                 for node_uuid in inst_nodes)) > 1:
         previously_split_instances.add(inst.uuid)
 
       if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group)
-                 for node_uuid in inst.all_nodes)) > 1:
+                 for node_uuid in inst_nodes)) > 1:
         all_split_instances.add(inst.uuid)
 
     return (list(all_split_instances - previously_split_instances),
diff --git a/lib/cmdlib/instance.py b/lib/cmdlib/instance.py
index 10b1b16..fd60c08 100644
--- a/lib/cmdlib/instance.py
+++ b/lib/cmdlib/instance.py
@@ -1629,7 +1629,7 @@ class LUInstanceCreate(LogicalUnit):
                    osparams_private=self.op.osparams_private,
                    osparams_secret=self.op.osparams_secret)
 
-    return self.cfg.GetNodeNames(list(iobj.all_nodes))
+    return self.cfg.GetNodeNames(list(self.cfg.GetInstanceNodes(iobj)))
 
 
 class LUInstanceRename(LogicalUnit):
@@ -1662,7 +1662,8 @@ class LUInstanceRename(LogicalUnit):
     """Build hooks nodes.
 
     """
-    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
+    nl = [self.cfg.GetMasterNode()] + \
+      list(self.cfg.GetInstanceNodes(self.instance))
     return (nl, nl)
 
   def CheckPrereq(self):
@@ -1746,7 +1747,7 @@ class LUInstanceRename(LogicalUnit):
     # update info on disks
     info = GetInstanceInfoText(renamed_inst)
     for (idx, disk) in enumerate(renamed_inst.disks):
-      for node_uuid in renamed_inst.all_nodes:
+      for node_uuid in self.cfg.GetInstanceNodes(renamed_inst):
         result = self.rpc.call_blockdev_setinfo(node_uuid,
                                                 (disk, renamed_inst), info)
         result.Warn("Error setting info on node %s for disk %s" %
@@ -1805,7 +1806,7 @@ class LUInstanceRemove(LogicalUnit):
 
     """
     nl = [self.cfg.GetMasterNode()]
-    nl_post = list(self.instance.all_nodes) + nl
+    nl_post = list(self.cfg.GetInstanceNodes(self.instance)) + nl
     return (nl, nl_post)
 
   def CheckPrereq(self):
@@ -1840,7 +1841,7 @@ class LUInstanceRemove(LogicalUnit):
 
     assert (self.owned_locks(locking.LEVEL_NODE) ==
             self.owned_locks(locking.LEVEL_NODE_RES))
-    assert not (set(self.instance.all_nodes) -
+    assert not (set(self.cfg.GetInstanceNodes(self.instance)) -
                 self.owned_locks(locking.LEVEL_NODE)), \
       "Not owning correct locks"
 
@@ -2678,7 +2679,8 @@ class LUInstanceSetParams(LogicalUnit):
     """Build hooks nodes.
 
     """
-    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
+    nl = [self.cfg.GetMasterNode()] + \
+        list(self.cfg.GetInstanceNodes(self.instance))
     return (nl, nl)
 
   def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
@@ -2893,9 +2895,9 @@ class LUInstanceSetParams(LogicalUnit):
     """
     self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
 
+    inst_nodes = self.cfg.GetInstanceNodes(self.instance)
     excl_stor = compat.any(
-      rpc.GetExclusiveStorageForNodes(self.cfg,
-                                      self.instance.all_nodes).values()
+      rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes).values()
       )
 
     # Check disk modifications. This is done here and not in CheckArguments
@@ -3059,7 +3061,7 @@ class LUInstanceSetParams(LogicalUnit):
                                    errors.ECODE_STATE)
 
     assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
-    node_uuids = list(self.instance.all_nodes)
+    node_uuids = list(self.cfg.GetInstanceNodes(self.instance))
     pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
 
     #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
@@ -3162,7 +3164,7 @@ class LUInstanceSetParams(LogicalUnit):
         hvspecs = [(self.instance.hypervisor,
                     self.cfg.GetClusterInfo()
                       .hvparams[self.instance.hypervisor])]
-        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
+        _CheckNodesPhysicalCPUs(self, self.cfg.GetInstanceNodes(self.instance),
                                 max_requested_cpu + 1,
                                 hvspecs)
 
@@ -3737,7 +3739,7 @@ class LUInstanceSetParams(LogicalUnit):
 
     if self.op.disk_template:
       if __debug__:
-        check_nodes = set(self.instance.all_nodes)
+        check_nodes = set(self.cfg.GetInstanceNodes(self.instance))
         if self.op.remote_node_uuid:
           check_nodes.add(self.op.remote_node_uuid)
         for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
@@ -3899,7 +3901,7 @@ class LUInstanceChangeGroup(LogicalUnit):
     self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
 
     # Check if node groups for locked instance are still correct
-    assert owned_nodes.issuperset(self.instance.all_nodes), \
+    assert owned_nodes.issuperset(self.cfg.GetInstanceNodes(self.instance)), \
       ("Instance %s's nodes changed while we kept the lock" %
        self.op.instance_name)
 
diff --git a/lib/cmdlib/instance_operation.py b/lib/cmdlib/instance_operation.py
index 3043032..e3fd859 100644
--- a/lib/cmdlib/instance_operation.py
+++ b/lib/cmdlib/instance_operation.py
@@ -86,7 +86,8 @@ class LUInstanceStartup(LogicalUnit):
     """Build hooks nodes.
 
     """
-    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
+    nl = [self.cfg.GetMasterNode()] + \
+        list(self.cfg.GetInstanceNodes(self.instance))
     return (nl, nl)
 
   def CheckPrereq(self):
@@ -108,8 +109,8 @@ class LUInstanceStartup(LogicalUnit):
       filled_hvp.update(self.op.hvparams)
       hv_type = hypervisor.GetHypervisorClass(self.instance.hypervisor)
       hv_type.CheckParameterSyntax(filled_hvp)
-      CheckHVParams(self, self.instance.all_nodes, self.instance.hypervisor,
-                    filled_hvp)
+      CheckHVParams(self, self.cfg.GetInstanceNodes(self.instance),
+                    self.instance.hypervisor, filled_hvp)
 
     CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
 
@@ -199,7 +200,8 @@ class LUInstanceShutdown(LogicalUnit):
     """Build hooks nodes.
 
     """
-    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
+    nl = [self.cfg.GetMasterNode()] + \
+      list(self.cfg.GetInstanceNodes(self.instance))
     return (nl, nl)
 
   def CheckPrereq(self):
@@ -275,7 +277,8 @@ class LUInstanceReinstall(LogicalUnit):
     """Build hooks nodes.
 
     """
-    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
+    nl = [self.cfg.GetMasterNode()] + \
+      list(self.cfg.GetInstanceNodes(self.instance))
     return (nl, nl)
 
   def CheckPrereq(self):
@@ -303,7 +306,7 @@ class LUInstanceReinstall(LogicalUnit):
 
   def _MergeValidateOsParams(self, instance):
     "Handle the OS parameter merging and validation for the target instance."
-    node_uuids = list(instance.all_nodes)
+    node_uuids = list(self.cfg.GetInstanceNodes(instance))
 
     self.op.osparams = self.op.osparams or {}
     self.op.osparams_private = self.op.osparams_private or {}
@@ -434,7 +437,8 @@ class LUInstanceReboot(LogicalUnit):
     """Build hooks nodes.
 
     """
-    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
+    nl = [self.cfg.GetMasterNode()] + \
+      list(self.cfg.GetInstanceNodes(self.instance))
     return (nl, nl)
 
   def CheckPrereq(self):
diff --git a/lib/cmdlib/instance_query.py b/lib/cmdlib/instance_query.py
index d327042..13d079f 100644
--- a/lib/cmdlib/instance_query.py
+++ b/lib/cmdlib/instance_query.py
@@ -207,7 +207,8 @@ class LUInstanceQueryData(NoHooksLU):
 
     cluster = self.cfg.GetClusterInfo()
 
-    node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
+    node_uuids = itertools.chain(*(self.cfg.GetInstanceNodes(i)
+                                   for i in self.wanted_instances))
     nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
 
     groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
diff --git a/lib/cmdlib/instance_storage.py b/lib/cmdlib/instance_storage.py
index cb7613c..eea4c1b 100644
--- a/lib/cmdlib/instance_storage.py
+++ b/lib/cmdlib/instance_storage.py
@@ -226,7 +226,7 @@ def CreateDisks(lu, instance, to_skip=None, 
target_node_uuid=None, disks=None):
   info = GetInstanceInfoText(instance)
   if target_node_uuid is None:
     pnode_uuid = instance.primary_node
-    all_node_uuids = instance.all_nodes
+    all_node_uuids = lu.cfg.GetInstanceNodes(instance, disks=disks)
   else:
     pnode_uuid = target_node_uuid
     all_node_uuids = [pnode_uuid]
@@ -610,7 +610,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
 
     ial.Run(self.op.iallocator)
 
-    assert req.RequiredNodes() == len(self.instance.all_nodes)
+    assert req.RequiredNodes() == len(self.cfg.GetInstanceNodes(self.instance))
 
     if not ial.success:
       raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
@@ -711,7 +711,8 @@ class LUInstanceRecreateDisks(LogicalUnit):
     """Build hooks nodes.
 
     """
-    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
+    nl = [self.cfg.GetMasterNode()] + \
+      list(self.cfg.GetInstanceNodes(self.instance))
     return (nl, nl)
 
   def CheckPrereq(self):
@@ -724,10 +725,11 @@ class LUInstanceRecreateDisks(LogicalUnit):
     assert instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
     if self.op.node_uuids:
-      if len(self.op.node_uuids) != len(instance.all_nodes):
+      inst_nodes = self.cfg.GetInstanceNodes(instance)
+      if len(self.op.node_uuids) != len(inst_nodes):
         raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
                                    " %d replacement nodes were specified" %
-                                   (instance.name, len(instance.all_nodes),
+                                   (instance.name, len(inst_nodes),
                                     len(self.op.node_uuids)),
                                    errors.ECODE_INVAL)
       assert instance.disk_template != constants.DT_DRBD8 or \
@@ -787,7 +789,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
     if self.op.node_uuids:
       node_uuids = self.op.node_uuids
     else:
-      node_uuids = instance.all_nodes
+      node_uuids = self.cfg.GetInstanceNodes(instance)
     excl_stor = compat.any(
       rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids).values()
       )
@@ -852,7 +854,8 @@ class LUInstanceRecreateDisks(LogicalUnit):
 
     # All touched nodes must be locked
     mylocks = self.owned_locks(locking.LEVEL_NODE)
-    assert mylocks.issuperset(frozenset(self.instance.all_nodes))
+    inst_nodes = self.cfg.GetInstanceNodes(self.instance)
+    assert mylocks.issuperset(frozenset(inst_nodes))
     new_disks = CreateDisks(self, self.instance, to_skip=to_skip)
 
     # TODO: Release node locks before wiping, or explain why it's not possible
@@ -1471,7 +1474,8 @@ class LUInstanceGrowDisk(LogicalUnit):
     """Build hooks nodes.
 
     """
-    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
+    nl = [self.cfg.GetMasterNode()] + \
+      list(self.cfg.GetInstanceNodes(self.instance))
     return (nl, nl)
 
   def CheckPrereq(self):
@@ -1483,7 +1487,7 @@ class LUInstanceGrowDisk(LogicalUnit):
     self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
-    node_uuids = list(self.instance.all_nodes)
+    node_uuids = list(self.cfg.GetInstanceNodes(self.instance))
     for node_uuid in node_uuids:
       CheckNodeOnline(self, node_uuid)
     self.node_es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids)
@@ -1544,7 +1548,8 @@ class LUInstanceGrowDisk(LogicalUnit):
                  utils.FormatUnit(self.target, "h")))
 
     # First run all grow ops in dry-run mode
-    for node_uuid in self.instance.all_nodes:
+    inst_nodes = self.cfg.GetInstanceNodes(self.instance)
+    for node_uuid in inst_nodes:
       result = self.rpc.call_blockdev_grow(node_uuid,
                                            (self.disk, self.instance),
                                            self.delta, True, True,
@@ -1576,7 +1581,7 @@ class LUInstanceGrowDisk(LogicalUnit):
 
     # We know that (as far as we can test) operations across different
     # nodes will succeed, time to run it for real on the backing storage
-    for node_uuid in self.instance.all_nodes:
+    for node_uuid in inst_nodes:
       result = self.rpc.call_blockdev_grow(node_uuid,
                                            (self.disk, self.instance),
                                            self.delta, False, True,
@@ -1992,7 +1997,7 @@ class TLReplaceDisks(Tasklet):
     @return: True if they are activated, False otherwise
 
     """
-    node_uuids = instance.all_nodes
+    node_uuids = self.cfg.GetInstanceNodes(instance)
 
     for idx, dev in enumerate(instance.disks):
       for node_uuid in node_uuids:
diff --git a/lib/cmdlib/node.py b/lib/cmdlib/node.py
index ad2d7cb..ed380f1 100644
--- a/lib/cmdlib/node.py
+++ b/lib/cmdlib/node.py
@@ -483,7 +483,7 @@ class LUNodeSetParams(LogicalUnit):
 
     """
     return (instance.disk_template in constants.DTS_INT_MIRROR and
-            self.op.node_uuid in instance.all_nodes)
+            self.op.node_uuid in self.cfg.GetInstanceNodes(instance))
 
   def ExpandNames(self):
     if self.lock_all:
@@ -867,7 +867,9 @@ def _GetNodeInstances(cfg, node_uuid):
 
   """
 
-  return _GetNodeInstancesInner(cfg, lambda inst: node_uuid in inst.all_nodes)
+  return _GetNodeInstancesInner(cfg,
+                                lambda inst: node_uuid in
+                                  cfg.GetInstanceNodes(inst))
 
 
 class LUNodeEvacuate(NoHooksLU):
@@ -1459,7 +1461,7 @@ class LUNodeRemove(LogicalUnit):
                                  " node is required", errors.ECODE_INVAL)
 
     for _, instance in self.cfg.GetAllInstancesInfo().items():
-      if node.uuid in instance.all_nodes:
+      if node.uuid in self.cfg.GetInstanceNodes(instance):
         raise errors.OpPrereqError("Instance %s is still running on the node,"
                                    " please remove first" % instance.name,
                                    errors.ECODE_INVAL)
@@ -1558,7 +1560,7 @@ class LURepairNodeStorage(NoHooksLU):
     for inst in _GetNodeInstances(self.cfg, self.op.node_uuid):
       if not inst.disks_active:
         continue
-      check_nodes = set(inst.all_nodes)
+      check_nodes = set(self.cfg.GetInstanceNodes(inst))
       check_nodes.discard(self.op.node_uuid)
       for inst_node_uuid in check_nodes:
         self._CheckFaultyDisks(inst, inst_node_uuid)
diff --git a/lib/config.py b/lib/config.py
index 6ba05c6..e50a930 100644
--- a/lib/config.py
+++ b/lib/config.py
@@ -1944,7 +1944,7 @@ class ConfigWriter(object):
     if primary_only:
       nodes = [instance.primary_node]
     else:
-      nodes = instance.all_nodes
+      nodes = self._UnlockedGetInstanceNodes(instance)
 
     return frozenset(self._UnlockedGetNodeInfo(node_uuid).group
                      for node_uuid in nodes)
@@ -2213,7 +2213,7 @@ class ConfigWriter(object):
     if primary_only:
       nodes_fn = lambda inst: [inst.primary_node]
     else:
-      nodes_fn = lambda inst: inst.all_nodes
+      nodes_fn = self._UnlockedGetInstanceNodes
 
     return frozenset(inst.uuid
                      for inst in self._ConfigData().instances.values()
diff --git a/lib/objects.py b/lib/objects.py
index d6e06d7..529bec5 100644
--- a/lib/objects.py
+++ b/lib/objects.py
@@ -1158,35 +1158,6 @@ class Instance(TaggableObject):
     "serial_no",
     ] + _TIMESTAMPS + _UUID
 
-  def _ComputeAllNodes(self):
-    """Compute the list of all nodes.
-
-    Since the data is already there (in the drbd disks), keeping it as
-    a separate normal attribute is redundant and if not properly
-    synchronised can cause problems. Thus it's better to compute it
-    dynamically.
-
-    """
-    def _Helper(nodes, device):
-      """Recursively computes nodes given a top device."""
-      if device.dev_type in constants.DTS_DRBD:
-        nodea, nodeb = device.logical_id[:2]
-        nodes.add(nodea)
-        nodes.add(nodeb)
-      if device.children:
-        for child in device.children:
-          _Helper(nodes, child)
-
-    all_nodes = set()
-    for device in self.disks:
-      _Helper(all_nodes, device)
-    # ensure that the primary node is always the first
-    all_nodes.discard(self.primary_node)
-    return (self.primary_node, ) + tuple(all_nodes)
-
-  all_nodes = property(_ComputeAllNodes, None, None,
-                       "List of names of all the nodes of the instance")
-
   def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
     """Provide a mapping of nodes to LVs this instance owns.
 
diff --git a/test/py/cmdlib/cmdlib_unittest.py 
b/test/py/cmdlib/cmdlib_unittest.py
index afb8682..caf73f7 100755
--- a/test/py/cmdlib/cmdlib_unittest.py
+++ b/test/py/cmdlib/cmdlib_unittest.py
@@ -588,6 +588,9 @@ class _FakeConfigForComputeIPolicyInstanceViolation:
       constants.ND_EXCLUSIVE_STORAGE: self.excl_stor,
       }
 
+  def GetInstanceNodes(self, instance):
+    return tuple(instance.primary_node)
+
 
 class TestComputeIPolicyInstanceViolation(unittest.TestCase):
   def test(self):
@@ -599,6 +602,7 @@ class 
TestComputeIPolicyInstanceViolation(unittest.TestCase):
     disks = [objects.Disk(size=512, spindles=13)]
     cfg = _FakeConfigForComputeIPolicyInstanceViolation(beparams, False)
     instance = objects.Instance(beparams=beparams, disks=disks, nics=[],
+                                primary_node="pnode_uuid",
                                 disk_template=constants.DT_PLAIN)
     stub = _StubComputeIPolicySpecViolation(2048, 2, 1, 0, [512], 4,
                                             constants.DT_PLAIN)
@@ -606,6 +610,7 @@ class 
TestComputeIPolicyInstanceViolation(unittest.TestCase):
                                                  cfg, _compute_fn=stub)
     self.assertEqual(ret, [])
     instance2 = objects.Instance(beparams={}, disks=disks, nics=[],
+                                 primary_node="pnode_uuid",
                                  disk_template=constants.DT_PLAIN)
     ret = common.ComputeIPolicyInstanceViolation(NotImplemented, instance2,
                                                  cfg, _compute_fn=stub)
-- 
1.9.1

Reply via email to