This patch replaces 'instance.disks' with 'GetInstanceDisks' everywhere
in the codebase. From now on, the function 'GetInstanceDisks' from the
config file has to be used in order to get the disks of an instance.
Also the functions 'AddInstDisk'/'RemoveInstDisk' have to be used to
add/remove a disk object to/from the config file.

Signed-off-by: Ilias Tsitsimpis <[email protected]>
---
 lib/cmdlib/backup.py             |   7 ++-
 lib/cmdlib/cluster.py            |   6 +-
 lib/cmdlib/common.py             |  10 ++--
 lib/cmdlib/group.py              |   5 +-
 lib/cmdlib/instance.py           | 124 +++++++++++++++++++++++++--------------
 lib/cmdlib/instance_migration.py |  20 ++++---
 lib/cmdlib/instance_query.py     |   2 +-
 lib/cmdlib/instance_storage.py   |  87 ++++++++++++++++-----------
 lib/cmdlib/instance_utils.py     |  15 +++--
 lib/config.py                    |  93 ++++++++++-------------------
 lib/masterd/iallocator.py        |   6 +-
 lib/masterd/instance.py          |   9 ++-
 lib/objects.py                   |  18 +++---
 lib/rpc/node.py                  |   3 +-
 src/Ganeti/Config.hs             |   5 +-
 src/Ganeti/Objects.hs            |   2 +-
 16 files changed, 228 insertions(+), 184 deletions(-)

diff --git a/lib/cmdlib/backup.py b/lib/cmdlib/backup.py
index 0bfdfeb..d9e845c 100644
--- a/lib/cmdlib/backup.py
+++ b/lib/cmdlib/backup.py
@@ -277,7 +277,7 @@ class LUBackupExport(LogicalUnit):
 
     # instance disk type verification
     # TODO: Implement export support for file-based disks
-    for disk in self.instance.disks:
+    for disk in self.cfg.GetInstanceDisks(self.instance):
       if disk.dev_type in constants.DTS_FILEBASED:
         raise errors.OpPrereqError("Export not supported for instances with"
                                    " file-based disks", errors.ECODE_INVAL)
@@ -315,7 +315,7 @@ class LUBackupExport(LogicalUnit):
                                    " only with the --zero-free-space option")
 
     self.secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance)
-    self.inst_disks = self.instance.disks
+    self.inst_disks = self.cfg.GetInstanceDisks(self.instance)
 
   def _CleanupExports(self, feedback_fn):
     """Removes exports of current instance from all other nodes.
@@ -401,7 +401,8 @@ class LUBackupExport(LogicalUnit):
     @return: Size of the disks in MiB
 
     """
-    return sum([d.size for d in self.instance.disks])
+    inst_disks = self.cfg.GetInstanceDisks(self.instance)
+    return sum([d.size for d in inst_disks])
 
   def ZeroFreeSpace(self, feedback_fn):
     """Zeroes the free space on a shutdown instance.
diff --git a/lib/cmdlib/cluster.py b/lib/cmdlib/cluster.py
index 087d9a5..eca5e49 100644
--- a/lib/cmdlib/cluster.py
+++ b/lib/cmdlib/cluster.py
@@ -611,7 +611,7 @@ class LUClusterRepairDiskSizes(NoHooksLU):
       pnode = instance.primary_node
       if pnode not in per_node_disks:
         per_node_disks[pnode] = []
-      for idx, disk in enumerate(instance.disks):
+      for idx, disk in enumerate(self.cfg.GetInstanceDisks(instance)):
         per_node_disks[pnode].append((instance, idx, disk))
 
     assert not (frozenset(per_node_disks.keys()) -
@@ -2394,7 +2394,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                     " that have exclusive storage set: %s",
                     instance.disk_template,
                     utils.CommaJoin(self.cfg.GetNodeNames(es_nodes)))
-      for (idx, disk) in enumerate(instance.disks):
+      for (idx, disk) in enumerate(self.cfg.GetInstanceDisks(instance)):
         self._ErrorIf(disk.spindles is None,
                       constants.CV_EINSTANCEMISSINGCFGPARAMETER, instance.name,
                       "number of spindles not configured for disk %s while"
@@ -3072,7 +3072,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                                 if instanceinfo[uuid].disk_template == 
diskless)
       disks = [(inst_uuid, disk)
                for inst_uuid in node_inst_uuids
-               for disk in instanceinfo[inst_uuid].disks]
+               for disk in self.cfg.GetInstanceDisks(instanceinfo[inst_uuid])]
 
       if not disks:
         nodisk_instances.update(uuid for uuid in node_inst_uuids
diff --git a/lib/cmdlib/common.py b/lib/cmdlib/common.py
index 07d38e2..ff38030 100644
--- a/lib/cmdlib/common.py
+++ b/lib/cmdlib/common.py
@@ -616,10 +616,11 @@ def ComputeIPolicyInstanceViolation(ipolicy, instance, 
cfg,
   cpu_count = be_full[constants.BE_VCPUS]
   inst_nodes = cfg.GetInstanceNodes(instance)
   es_flags = rpc.GetExclusiveStorageForNodes(cfg, inst_nodes)
+  disks = cfg.GetInstanceDisks(instance)
   if any(es_flags.values()):
     # With exclusive storage use the actual spindles
     try:
-      spindle_use = sum([disk.spindles for disk in instance.disks])
+      spindle_use = sum([disk.spindles for disk in disks])
     except TypeError:
       ret.append("Number of spindles not configured for disks of instance %s"
                  " while exclusive storage is enabled, try running gnt-cluster"
@@ -628,8 +629,8 @@ def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
       spindle_use = None
   else:
     spindle_use = be_full[constants.BE_SPINDLE_USE]
-  disk_count = len(instance.disks)
-  disk_sizes = [disk.size for disk in instance.disks]
+  disk_count = len(disks)
+  disk_sizes = [disk.size for disk in disks]
   nic_count = len(instance.nics)
   disk_template = instance.disk_template
 
@@ -1114,8 +1115,9 @@ def CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
 def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_uuid, prereq):
   faulty = []
 
+  disks = cfg.GetInstanceDisks(instance)
   result = rpc_runner.call_blockdev_getmirrorstatus(
-             node_uuid, (instance.disks, instance))
+             node_uuid, (disks, instance))
   result.Raise("Failed to get disk status from node %s" %
                cfg.GetNodeName(node_uuid),
                prereq=prereq, ecode=errors.ECODE_ENVIRON)
diff --git a/lib/cmdlib/group.py b/lib/cmdlib/group.py
index 5720dad..ec4bb54 100644
--- a/lib/cmdlib/group.py
+++ b/lib/cmdlib/group.py
@@ -915,7 +915,7 @@ class LUGroupVerifyDisks(NoHooksLU):
         node_to_inst.setdefault(node_uuid, []).append(inst)
 
     for (node_uuid, insts) in node_to_inst.items():
-      node_disks = [(inst.disks, inst) for inst in insts]
+      node_disks = [(self.cfg.GetInstanceDisks(inst), inst) for inst in insts]
       node_res = self.rpc.call_drbd_needs_activation(node_uuid, node_disks)
       msg = node_res.fail_msg
       if msg:
@@ -926,7 +926,8 @@ class LUGroupVerifyDisks(NoHooksLU):
 
       faulty_disk_uuids = set(node_res.payload)
       for inst in self.instances.values():
-        inst_disk_uuids = set([disk.uuid for disk in inst.disks])
+        disks = self.cfg.GetInstanceDisks(inst)
+        inst_disk_uuids = set([disk.uuid for disk in disks])
         if inst_disk_uuids.intersection(faulty_disk_uuids):
           offline_disk_instance_names.add(inst.name)
 
diff --git a/lib/cmdlib/instance.py b/lib/cmdlib/instance.py
index fd60c08..77957d2 100644
--- a/lib/cmdlib/instance.py
+++ b/lib/cmdlib/instance.py
@@ -1365,6 +1365,8 @@ class LUInstanceCreate(LogicalUnit):
 
     if disk_abort:
       RemoveDisks(self, instance)
+      for disk_uuid in list(instance.disks):
+        self.cfg.RemoveInstDisk(instance, disk_uuid)
       self.cfg.RemoveInstance(instance.uuid)
       # Make sure the instance lock gets removed
       self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
@@ -1411,7 +1413,7 @@ class LUInstanceCreate(LogicalUnit):
                             uuid=instance_uuid,
                             os=os_type,
                             primary_node=self.pnode.uuid,
-                            nics=self.nics, disks=disks,
+                            nics=self.nics, disks=[],
                             disk_template=self.op.disk_template,
                             disks_active=False,
                             admin_state=constants.ADMINST_DOWN,
@@ -1442,16 +1444,25 @@ class LUInstanceCreate(LogicalUnit):
     else:
       feedback_fn("* creating instance disks...")
       try:
-        CreateDisks(self, iobj)
+        CreateDisks(self, iobj, disks=disks)
       except errors.OpExecError:
         self.LogWarning("Device creation failed")
         self.cfg.ReleaseDRBDMinors(self.op.instance_name)
         raise
 
     feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
-
     self.cfg.AddInstance(iobj, self.proc.GetECId())
 
+    # re-read the instance from the configuration
+    iobj = self.cfg.GetInstanceInfo(iobj.uuid)
+
+    feedback_fn("adding disks to cluster config")
+    for disk in disks:
+      self.cfg.AddInstDisk(iobj, disk)
+
+    # re-read the instance from the configuration
+    iobj = self.cfg.GetInstanceInfo(iobj.uuid)
+
     # Declare that we don't want to remove the instance lock anymore, as we've
     # added the instance to the config
     del self.remove_locks[locking.LEVEL_INSTANCE]
@@ -1498,6 +1509,7 @@ class LUInstanceCreate(LogicalUnit):
     ReleaseLocks(self, locking.LEVEL_NODE_RES)
 
     if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
+      disks = self.cfg.GetInstanceDisks(iobj)
       if self.op.mode == constants.INSTANCE_CREATE:
         os_image = objects.GetOSImage(self.op.osparams)
 
@@ -1507,8 +1519,8 @@ class LUInstanceCreate(LogicalUnit):
           if pause_sync:
             feedback_fn("* pausing disk sync to install instance OS")
             result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
-                                                              (iobj.disks,
-                                                               iobj), True)
+                                                              (disks, iobj),
+                                                              True)
             for idx, success in enumerate(result.payload):
               if not success:
                 logging.warn("pause-sync of instance %s for disk %d failed",
@@ -1524,8 +1536,8 @@ class LUInstanceCreate(LogicalUnit):
           if pause_sync:
             feedback_fn("* resuming disk sync")
             result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
-                                                              (iobj.disks,
-                                                               iobj), False)
+                                                              (disks, iobj),
+                                                              False)
             for idx, success in enumerate(result.payload):
               if not success:
                 logging.warn("resume-sync of instance %s for disk %d failed",
@@ -1547,10 +1559,10 @@ class LUInstanceCreate(LogicalUnit):
 
             if iobj.os:
               dst_io = constants.IEIO_SCRIPT
-              dst_ioargs = ((iobj.disks[idx], iobj), idx)
+              dst_ioargs = ((disks[idx], iobj), idx)
             else:
               dst_io = constants.IEIO_RAW_DISK
-              dst_ioargs = (iobj.disks[idx], iobj)
+              dst_ioargs = (disks[idx], iobj)
 
             # FIXME: pass debug option from opcode to backend
             dt = masterd.instance.DiskTransfer("disk/%s" % idx,
@@ -1717,8 +1729,8 @@ class LUInstanceRename(LogicalUnit):
     if (self.instance.disk_template in (constants.DT_FILE,
                                         constants.DT_SHARED_FILE) and
         self.op.new_name != self.instance.name):
-      old_file_storage_dir = os.path.dirname(
-                               self.instance.disks[0].logical_id[1])
+      disks = self.cfg.GetInstanceDisks(self.instance)
+      old_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
       rename_file_storage = True
 
     self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
@@ -1729,10 +1741,10 @@ class LUInstanceRename(LogicalUnit):
 
     # re-read the instance from the configuration after rename
     renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
+    disks = self.cfg.GetInstanceDisks(renamed_inst)
 
     if rename_file_storage:
-      new_file_storage_dir = os.path.dirname(
-                               renamed_inst.disks[0].logical_id[1])
+      new_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
       result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
                                                      old_file_storage_dir,
                                                      new_file_storage_dir)
@@ -1746,7 +1758,7 @@ class LUInstanceRename(LogicalUnit):
 
     # update info on disks
     info = GetInstanceInfoText(renamed_inst)
-    for (idx, disk) in enumerate(renamed_inst.disks):
+    for (idx, disk) in enumerate(disks):
       for node_uuid in self.cfg.GetInstanceNodes(renamed_inst):
         result = self.rpc.call_blockdev_setinfo(node_uuid,
                                                 (disk, renamed_inst), info)
@@ -1819,7 +1831,7 @@ class LUInstanceRemove(LogicalUnit):
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
     self.secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance)
-    self.inst_disks = self.instance.disks
+    self.inst_disks = self.cfg.GetInstanceDisks(self.instance)
 
   def Exec(self, feedback_fn):
     """Remove the instance.
@@ -1925,7 +1937,8 @@ class LUInstanceMove(LogicalUnit):
     cluster = self.cfg.GetClusterInfo()
     bep = cluster.FillBE(self.instance)
 
-    for idx, dsk in enumerate(self.instance.disks):
+    disks = self.cfg.GetInstanceDisks(self.instance)
+    for idx, dsk in enumerate(disks):
       if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
                               constants.DT_SHARED_FILE, constants.DT_GLUSTER):
         raise errors.OpPrereqError("Instance disk %d has a complex layout,"
@@ -1992,7 +2005,8 @@ class LUInstanceMove(LogicalUnit):
     errs = []
     transfers = []
     # activate, get path, create transfer jobs
-    for idx, disk in enumerate(self.instance.disks):
+    disks = self.cfg.GetInstanceDisks(self.instance)
+    for idx, disk in enumerate(disks):
       # FIXME: pass debug option from opcode to backend
       dt = masterd.instance.DiskTransfer("disk/%s" % idx,
                                          constants.IEIO_RAW_DISK,
@@ -2001,6 +2015,7 @@ class LUInstanceMove(LogicalUnit):
                                          (disk, self.instance),
                                          None)
       transfers.append(dt)
+      self.cfg.Update(disk, feedback_fn)
 
     import_result = \
       masterd.instance.TransferInstanceData(self, feedback_fn,
@@ -2417,16 +2432,6 @@ def _ApplyContainerMods(kind, container, chgdesc, mods,
       chgdesc.extend(changes)
 
 
-def _UpdateIvNames(base_index, disks):
-  """Updates the C{iv_name} attribute of disks.
-
-  @type disks: list of L{objects.Disk}
-
-  """
-  for (idx, disk) in enumerate(disks):
-    disk.iv_name = "disk/%s" % (base_index + idx, )
-
-
 class LUInstanceSetParams(LogicalUnit):
   """Modifies an instances's parameters.
 
@@ -2857,7 +2862,7 @@ class LUInstanceSetParams(LogicalUnit):
       assert self.instance.disk_template == constants.DT_PLAIN
       disks = [{constants.IDISK_SIZE: d.size,
                 constants.IDISK_VG: d.logical_id[0]}
-               for d in self.instance.disks]
+               for d in self.cfg.GetInstanceDisks(self.instance)]
       required = ComputeDiskSizePerVG(self.op.disk_template, disks)
       CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
 
@@ -2954,7 +2959,8 @@ class LUInstanceSetParams(LogicalUnit):
       disk.name = params.get(constants.IDISK_NAME, None)
 
     # Verify disk changes (operating on a copy)
-    disks = copy.deepcopy(self.instance.disks)
+    inst_disks = self.cfg.GetInstanceDisks(self.instance)
+    disks = copy.deepcopy(inst_disks)
     _ApplyContainerMods("disk", disks, None, self.diskmod, None,
                         _PrepareDiskMod, None)
     utils.ValidateDeviceNames("disk", disks)
@@ -2962,7 +2968,7 @@ class LUInstanceSetParams(LogicalUnit):
       raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
                                  " more" % constants.MAX_DISKS,
                                  errors.ECODE_STATE)
-    disk_sizes = [disk.size for disk in self.instance.disks]
+    disk_sizes = [disk.size for disk in inst_disks]
     disk_sizes.extend(params["size"] for (op, idx, params, private) in
                       self.diskmod if op == constants.DDM_ADD)
     ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
@@ -3386,11 +3392,12 @@ class LUInstanceSetParams(LogicalUnit):
 
     assert self.instance.disk_template == constants.DT_PLAIN
 
+    old_disks = self.cfg.GetInstanceDisks(self.instance)
     # create a fake disk info for _GenerateDiskTemplate
     disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
                   constants.IDISK_VG: d.logical_id[0],
                   constants.IDISK_NAME: d.name}
-                 for d in self.instance.disks]
+                 for d in old_disks]
     new_disks = GenerateDiskTemplate(self, self.op.disk_template,
                                      self.instance.uuid, pnode_uuid,
                                      [snode_uuid], disk_info, None, None, 0,
@@ -3412,7 +3419,7 @@ class LUInstanceSetParams(LogicalUnit):
     # old ones
     feedback_fn("Renaming original volumes...")
     rename_list = [(o, n.children[0].logical_id)
-                   for (o, n) in zip(self.instance.disks, new_disks)]
+                   for (o, n) in zip(old_disks, new_disks)]
     result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
     result.Raise("Failed to rename original LVs")
 
@@ -3429,16 +3436,26 @@ class LUInstanceSetParams(LogicalUnit):
       feedback_fn("Initializing of DRBD devices failed;"
                   " renaming back original volumes...")
       rename_back_list = [(n.children[0], o.logical_id)
-                          for (n, o) in zip(new_disks, self.instance.disks)]
+                          for (n, o) in zip(new_disks, old_disks)]
       result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
       result.Raise("Failed to rename LVs back after error %s" % str(e))
       raise
 
-    # at this point, the instance has been modified
+    # Remove the old disks from the instance
+    for old_disk in old_disks:
+      self.cfg.RemoveInstDisk(self.instance, old_disk.uuid)
+
+    # Update instance structure
     self.instance.disk_template = constants.DT_DRBD8
-    self.instance.disks = new_disks
     self.cfg.Update(self.instance, feedback_fn)
 
+    # Attach the new disks to the instance
+    for (idx, new_disk) in enumerate(new_disks):
+      self.cfg.AddInstDisk(self.instance, new_disk, idx=idx)
+
+    # re-read the instance from the configuration
+    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
+
     # Release node locks while waiting for sync
     ReleaseLocks(self, locking.LEVEL_NODE)
 
@@ -3463,8 +3480,9 @@ class LUInstanceSetParams(LogicalUnit):
     snode_uuid = secondary_nodes[0]
     feedback_fn("Converting template to plain")
 
-    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, 
self.cfg)
-    new_disks = [d.children[0] for d in self.instance.disks]
+    disks = self.cfg.GetInstanceDisks(self.instance)
+    old_disks = AnnotateDiskParams(self.instance, disks, self.cfg)
+    new_disks = [d.children[0] for d in disks]
 
     # copy over size, mode and name
     for parent, child in zip(old_disks, new_disks):
@@ -3478,12 +3496,21 @@ class LUInstanceSetParams(LogicalUnit):
       tcp_port = disk.logical_id[2]
       self.cfg.AddTcpUdpPort(tcp_port)
 
-    # update instance structure
-    self.instance.disks = new_disks
+    # Remove the old disks from the instance
+    for old_disk in old_disks:
+      self.cfg.RemoveInstDisk(self.instance, old_disk.uuid)
+
+    # Update instance structure
     self.instance.disk_template = constants.DT_PLAIN
-    _UpdateIvNames(0, self.instance.disks)
     self.cfg.Update(self.instance, feedback_fn)
 
+    # Attach the new disks to the instance
+    for (idx, new_disk) in enumerate(new_disks):
+      self.cfg.AddInstDisk(self.instance, new_disk, idx=idx)
+
+    # re-read the instance from the configuration
+    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
+
     # Release locks in case removing disks takes a while
     ReleaseLocks(self, locking.LEVEL_NODE)
 
@@ -3526,7 +3553,8 @@ class LUInstanceSetParams(LogicalUnit):
     """
     # add a new disk
     if self.instance.disk_template in constants.DTS_FILEBASED:
-      (file_driver, file_path) = self.instance.disks[0].logical_id
+      disks = self.cfg.GetInstanceDisks(self.instance)
+      (file_driver, file_path) = disks[0].logical_id
       file_path = os.path.dirname(file_path)
     else:
       file_driver = file_path = None
@@ -3539,6 +3567,10 @@ class LUInstanceSetParams(LogicalUnit):
                            file_driver, idx, self.Log, self.diskparams)[0]
 
     new_disks = CreateDisks(self, self.instance, disks=[disk])
+    self.cfg.AddInstDisk(self.instance, disk, idx)
+
+    # re-read the instance from the configuration
+    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
 
     if self.cluster.prealloc_wipe_disks:
       # Wipe new disk
@@ -3632,6 +3664,12 @@ class LUInstanceSetParams(LogicalUnit):
     if root.dev_type in constants.DTS_DRBD:
       self.cfg.AddTcpUdpPort(root.logical_id[2])
 
+    # Remove disk from config
+    self.cfg.RemoveInstDisk(self.instance, root.uuid)
+
+    # re-read the instance from the configuration
+    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
+
     return hotmsg
 
   def _CreateNewNic(self, idx, params, private):
@@ -3732,10 +3770,10 @@ class LUInstanceSetParams(LogicalUnit):
       result.append(("runtime_memory", self.op.runtime_mem))
 
     # Apply disk changes
-    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
+    inst_disks = self.cfg.GetInstanceDisks(self.instance)
+    _ApplyContainerMods("disk", inst_disks, result, self.diskmod,
                         self._CreateNewDisk, self._ModifyDisk,
                         self._RemoveDisk, post_add_fn=self._PostAddDisk)
-    _UpdateIvNames(0, self.instance.disks)
 
     if self.op.disk_template:
       if __debug__:
diff --git a/lib/cmdlib/instance_migration.py b/lib/cmdlib/instance_migration.py
index 8e5ce91..9840787 100644
--- a/lib/cmdlib/instance_migration.py
+++ b/lib/cmdlib/instance_migration.py
@@ -483,11 +483,11 @@ class TLMigrateInstance(Tasklet):
     """
     self.feedback_fn("* wait until resync is done")
     all_done = False
+    disks = self.cfg.GetInstanceDisks(self.instance)
     while not all_done:
       all_done = True
       result = self.rpc.call_drbd_wait_sync(self.all_node_uuids,
-                                            (self.instance.disks,
-                                             self.instance))
+                                            (disks, self.instance))
       min_percent = 100
       for node_uuid, nres in result.items():
         nres.Raise("Cannot resync disks on node %s" %
@@ -508,8 +508,9 @@ class TLMigrateInstance(Tasklet):
     self.feedback_fn("* switching node %s to secondary mode" %
                      self.cfg.GetNodeName(node_uuid))
 
+    disks = self.cfg.GetInstanceDisks(self.instance)
     result = self.rpc.call_blockdev_close(node_uuid, self.instance.name,
-                                          (self.instance.disks, self.instance))
+                                          (disks, self.instance))
     result.Raise("Cannot change disk to secondary on node %s" %
                  self.cfg.GetNodeName(node_uuid))
 
@@ -518,8 +519,9 @@ class TLMigrateInstance(Tasklet):
 
     """
     self.feedback_fn("* changing into standalone mode")
+    disks = self.cfg.GetInstanceDisks(self.instance)
     result = self.rpc.call_drbd_disconnect_net(
-               self.all_node_uuids, (self.instance.disks, self.instance))
+               self.all_node_uuids, (disks, self.instance))
     for node_uuid, nres in result.items():
       nres.Raise("Cannot disconnect disks node %s" %
                  self.cfg.GetNodeName(node_uuid))
@@ -533,8 +535,9 @@ class TLMigrateInstance(Tasklet):
     else:
       msg = "single-master"
     self.feedback_fn("* changing disks into %s mode" % msg)
+    disks = self.cfg.GetInstanceDisks(self.instance)
     result = self.rpc.call_drbd_attach_net(self.all_node_uuids,
-                                           (self.instance.disks, 
self.instance),
+                                           (disks, self.instance),
                                            self.instance.name, multimaster)
     for node_uuid, nres in result.items():
       nres.Raise("Cannot change disks config on node %s" %
@@ -680,7 +683,7 @@ class TLMigrateInstance(Tasklet):
                          (src_version, dst_version))
 
     self.feedback_fn("* checking disk consistency between source and target")
-    for (idx, dev) in enumerate(self.instance.disks):
+    for (idx, dev) in enumerate(self.cfg.GetInstanceDisks(self.instance)):
       if not CheckDiskConsistency(self.lu, self.instance, dev,
                                   self.target_node_uuid,
                                   False):
@@ -815,7 +818,8 @@ class TLMigrateInstance(Tasklet):
     # If the instance's disk template is `rbd' or `ext' and there was a
     # successful migration, unmap the device from the source node.
     if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT):
-      disks = ExpandCheckDisks(self.instance, self.instance.disks)
+      inst_disks = self.cfg.GetInstanceDisks(self.instance)
+      disks = ExpandCheckDisks(inst_disks, inst_disks)
       self.feedback_fn("* unmapping instance's disks from %s" %
                        self.cfg.GetNodeName(self.source_node_uuid))
       for disk in disks:
@@ -846,7 +850,7 @@ class TLMigrateInstance(Tasklet):
 
     if self.instance.disks_active:
       self.feedback_fn("* checking disk consistency between source and target")
-      for (idx, dev) in enumerate(self.instance.disks):
+      for (idx, dev) in enumerate(self.cfg.GetInstanceDisks(self.instance)):
         # for drbd, these are drbd over lvm
         if not CheckDiskConsistency(self.lu, self.instance, dev,
                                     self.target_node_uuid, False):
diff --git a/lib/cmdlib/instance_query.py b/lib/cmdlib/instance_query.py
index 13d079f..c10c1ca 100644
--- a/lib/cmdlib/instance_query.py
+++ b/lib/cmdlib/instance_query.py
@@ -247,7 +247,7 @@ class LUInstanceQueryData(NoHooksLU):
 
       disks = map(compat.partial(self._ComputeDiskStatus, instance,
                                  node_uuid2name_fn),
-                  instance.disks)
+                  self.cfg.GetInstanceDisks(instance))
 
       secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance)
       snodes_group_uuids = [nodes[snode_uuid].group
diff --git a/lib/cmdlib/instance_storage.py b/lib/cmdlib/instance_storage.py
index f089c15..d38852b 100644
--- a/lib/cmdlib/instance_storage.py
+++ b/lib/cmdlib/instance_storage.py
@@ -224,6 +224,7 @@ def CreateDisks(lu, instance, to_skip=None, 
target_node_uuid=None, disks=None):
 
   """
   info = GetInstanceInfoText(instance)
+  inst_disks = lu.cfg.GetInstanceDisks(instance)
   if target_node_uuid is None:
     pnode_uuid = instance.primary_node
     all_node_uuids = lu.cfg.GetInstanceNodes(instance, disks=disks)
@@ -232,12 +233,15 @@ def CreateDisks(lu, instance, to_skip=None, 
target_node_uuid=None, disks=None):
     all_node_uuids = [pnode_uuid]
 
   if disks is None:
-    disks = instance.disks
+    disks = inst_disks
 
   CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template)
 
   if instance.disk_template in constants.DTS_FILEBASED:
-    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
+    if inst_disks:
+      file_storage_dir = os.path.dirname(inst_disks[0].logical_id[1])
+    else:
+      file_storage_dir = os.path.dirname(disks[0].logical_id[1])
     result = lu.rpc.call_file_storage_dir_create(pnode_uuid, file_storage_dir)
 
     result.Raise("Failed to create directory '%s' on"
@@ -594,7 +598,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
       constants.IDISK_SIZE: d.size,
       constants.IDISK_MODE: d.mode,
       constants.IDISK_SPINDLES: d.spindles,
-      } for d in self.instance.disks]
+      } for d in self.cfg.GetInstanceDisks(self.instance)]
     req = iallocator.IAReqInstanceAlloc(name=self.op.instance_name,
                                         disk_template=disk_template,
                                         tags=list(self.instance.GetTags()),
@@ -806,7 +810,8 @@ class LUInstanceRecreateDisks(LogicalUnit):
     to_skip = []
     mods = [] # keeps track of needed changes
 
-    for idx, disk in enumerate(self.instance.disks):
+    inst_disks = self.cfg.GetInstanceDisks(self.instance)
+    for idx, disk in enumerate(inst_disks):
       try:
         changes = self.disks[idx]
       except KeyError:
@@ -834,7 +839,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
     # now that we have passed all asserts above, we can apply the mods
     # in a single run (to avoid partial changes)
     for idx, new_id, changes in mods:
-      disk = self.instance.disks[idx]
+      disk = inst_disks[idx]
       if new_id is not None:
         assert disk.dev_type == constants.DT_DRBD8
         disk.logical_id = new_id
@@ -842,6 +847,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
         disk.Update(size=changes.get(constants.IDISK_SIZE, None),
                     mode=changes.get(constants.IDISK_MODE, None),
                     spindles=changes.get(constants.IDISK_SPINDLES, None))
+      self.cfg.Update(disk, feedback_fn)
 
     # change primary node, if needed
     if self.op.node_uuids:
@@ -860,8 +866,9 @@ class LUInstanceRecreateDisks(LogicalUnit):
 
     # TODO: Release node locks before wiping, or explain why it's not possible
     if self.cfg.GetClusterInfo().prealloc_wipe_disks:
+      inst_disks = self.cfg.GetInstanceDisks(self.instance)
       wipedisks = [(idx, disk, 0)
-                   for (idx, disk) in enumerate(self.instance.disks)
+                   for (idx, disk) in enumerate(inst_disks)
                    if idx not in to_skip]
       WipeOrCleanupDisks(self, self.instance, disks=wipedisks,
                          cleanup=new_disks)
@@ -1018,8 +1025,9 @@ def WipeDisks(lu, instance, disks=None):
   node_name = lu.cfg.GetNodeName(node_uuid)
 
   if disks is None:
+    inst_disks = lu.cfg.GetInstanceDisks(instance)
     disks = [(idx, disk, 0)
-             for (idx, disk) in enumerate(instance.disks)]
+             for (idx, disk) in enumerate(inst_disks)]
 
   logging.info("Pausing synchronization of disks of instance '%s'",
                instance.name)
@@ -1112,10 +1120,11 @@ def ImageDisks(lu, instance, image, disks=None):
   node_uuid = instance.primary_node
   node_name = lu.cfg.GetNodeName(node_uuid)
 
+  inst_disks = lu.cfg.GetInstanceDisks(instance)
   if disks is None:
-    disks = [(0, instance.disks[0])]
+    disks = [(0, inst_disks[0])]
   else:
-    disks = map(lambda idx: (idx, instance.disks[idx]), disks)
+    disks = map(lambda idx: (idx, inst_disks[idx]), disks)
 
   logging.info("Pausing synchronization of disks of instance '%s'",
                instance.name)
@@ -1180,7 +1189,7 @@ def WipeOrCleanupDisks(lu, instance, disks=None, 
cleanup=None):
     raise
 
 
-def ExpandCheckDisks(instance, disks):
+def ExpandCheckDisks(instance_disks, disks):
   """Return the instance disks selected by the disks list
 
   @type disks: list of L{objects.Disk} or None
@@ -1190,12 +1199,14 @@ def ExpandCheckDisks(instance, disks):
 
   """
   if disks is None:
-    return instance.disks
+    return instance_disks
   else:
-    if not set(disks).issubset(instance.disks):
+    inst_disks_uuids = [d.uuid for d in instance_disks]
+    disks_uuids = [d.uuid for d in disks]
+    if not set(disks_uuids).issubset(inst_disks_uuids):
       raise errors.ProgrammerError("Can only act on disks belonging to the"
-                                   " target instance: expected a subset of %r,"
-                                   " got %r" % (instance.disks, disks))
+                                   " target instance: expected a subset of %s,"
+                                   " got %s" % (inst_disks_uuids, disks_uuids))
     return disks
 
 
@@ -1203,10 +1214,11 @@ def WaitForSync(lu, instance, disks=None, 
oneshot=False):
   """Sleep and poll for an instance's disk to sync.
 
   """
-  if not instance.disks or disks is not None and not disks:
+  inst_disks = lu.cfg.GetInstanceDisks(instance)
+  if not inst_disks or disks is not None and not disks:
     return True
 
-  disks = ExpandCheckDisks(instance, disks)
+  disks = ExpandCheckDisks(inst_disks, disks)
 
   if not oneshot:
     lu.LogInfo("Waiting for instance %s to sync disks", instance.name)
@@ -1291,7 +1303,8 @@ def ShutdownInstanceDisks(lu, instance, disks=None, 
ignore_primary=False):
   if disks is None:
     # only mark instance disks as inactive if all disks are affected
     lu.cfg.MarkInstanceDisksInactive(instance.uuid)
-  disks = ExpandCheckDisks(instance, disks)
+  inst_disks = lu.cfg.GetInstanceDisks(instance)
+  disks = ExpandCheckDisks(inst_disks, disks)
 
   for disk in disks:
     for node_uuid, top_disk in disk.ComputeNodeTree(instance.primary_node):
@@ -1351,7 +1364,8 @@ def AssembleInstanceDisks(lu, instance, disks=None, 
ignore_secondaries=False,
     # only mark instance disks as active if all disks are affected
     instance = lu.cfg.MarkInstanceDisksActive(instance.uuid)
 
-  disks = ExpandCheckDisks(instance, disks)
+  inst_disks = lu.cfg.GetInstanceDisks(instance)
+  disks = ExpandCheckDisks(inst_disks, disks)
 
   # With the two passes mechanism we try to reduce the window of
   # opportunity for the race condition of switching DRBD to primary
@@ -1496,7 +1510,7 @@ class LUInstanceGrowDisk(LogicalUnit):
       raise errors.OpPrereqError("Instance's disk layout does not support"
                                  " growing", errors.ECODE_INVAL)
 
-    self.disk = self.instance.FindDisk(self.op.disk)
+    self.disk = self.cfg.GetDiskInfo(self.instance.FindDisk(self.op.disk))
 
     if self.op.absolute:
       self.target = self.op.amount
@@ -1610,7 +1624,8 @@ class LUInstanceGrowDisk(LogicalUnit):
     assert wipe_disks ^ (old_disk_size is None)
 
     if wipe_disks:
-      assert self.instance.disks[self.op.disk] == self.disk
+      inst_disks = self.cfg.GetInstanceDisks(self.instance)
+      assert inst_disks[self.op.disk] == self.disk
 
       # Wipe newly added disk space
       WipeDisks(self, self.instance,
@@ -1999,7 +2014,7 @@ class TLReplaceDisks(Tasklet):
     """
     node_uuids = self.cfg.GetInstanceNodes(instance)
 
-    for idx, dev in enumerate(instance.disks):
+    for idx, dev in enumerate(self.cfg.GetInstanceDisks(instance)):
       for node_uuid in node_uuids:
         self.lu.LogInfo("Checking disk/%d on %s", idx,
                         self.cfg.GetNodeName(node_uuid))
@@ -2027,7 +2042,7 @@ class TLReplaceDisks(Tasklet):
       raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
                                  " instances", errors.ECODE_INVAL)
 
-    secondary_nodes = self.cfg.GetInstanceSeconaryNodes(self.instance)
+    secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance)
     if len(secondary_nodes) != 1:
       raise errors.OpPrereqError("The instance has a strange layout,"
                                  " expected one secondary but found %d" %
@@ -2252,7 +2267,7 @@ class TLReplaceDisks(Tasklet):
 
   def _CheckDisksExistence(self, node_uuids):
     # Check disk existence
-    for idx, dev in enumerate(self.instance.disks):
+    for idx, dev in enumerate(self.cfg.GetInstnaceDisks(self.instance)):
       if idx not in self.disks:
         continue
 
@@ -2277,7 +2292,7 @@ class TLReplaceDisks(Tasklet):
                                     extra_hint))
 
   def _CheckDisksConsistency(self, node_uuid, on_primary, ldisk):
-    for idx, dev in enumerate(self.instance.disks):
+    for idx, dev in enumerate(self.cfg.GetInstanceDisks(self.instance)):
       if idx not in self.disks:
         continue
 
@@ -2300,7 +2315,8 @@ class TLReplaceDisks(Tasklet):
     """
     iv_names = {}
 
-    disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
+    inst_disks = self.cfg.GetInstanceDisks(self.instance)
+    disks = AnnotateDiskParams(self.instance, inst_disks, self.cfg)
     for idx, dev in enumerate(disks):
       if idx not in self.disks:
         continue
@@ -2543,7 +2559,8 @@ class TLReplaceDisks(Tasklet):
 
     # Step: create new storage
     self.lu.LogStep(3, steps_total, "Allocate new storage")
-    disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
+    inst_disks = self.cfg.GetInstanceDisks(self.instance)
+    disks = AnnotateDiskParams(self.instance, inst_disks, self.cfg)
     excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg,
                                                   self.new_node_uuid)
     for idx, dev in enumerate(disks):
@@ -2563,12 +2580,12 @@ class TLReplaceDisks(Tasklet):
     # error and the success paths
     self.lu.LogStep(4, steps_total, "Changing drbd configuration")
     minors = self.cfg.AllocateDRBDMinor([self.new_node_uuid
-                                         for _ in self.instance.disks],
+                                         for _ in inst_disks],
                                         self.instance.uuid)
     logging.debug("Allocated minors %r", minors)
 
     iv_names = {}
-    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
+    for idx, (dev, new_minor) in enumerate(zip(inst_disks, minors)):
       self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
                       (self.cfg.GetNodeName(self.new_node_uuid), idx))
       # create new devices on new_node; note that we create two IDs:
@@ -2607,7 +2624,7 @@ class TLReplaceDisks(Tasklet):
         raise
 
     # We have new devices, shutdown the drbd on the old secondary
-    for idx, dev in enumerate(self.instance.disks):
+    for idx, dev in enumerate(inst_disks):
       self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx)
       msg = self.rpc.call_blockdev_shutdown(self.target_node_uuid,
                                             (dev, self.instance)).fail_msg
@@ -2619,7 +2636,7 @@ class TLReplaceDisks(Tasklet):
 
     self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
     result = self.rpc.call_drbd_disconnect_net(
-               [pnode], (self.instance.disks, self.instance))[pnode]
+               [pnode], (inst_disks, self.instance))[pnode]
 
     msg = result.fail_msg
     if msg:
@@ -2633,6 +2650,7 @@ class TLReplaceDisks(Tasklet):
     self.lu.LogInfo("Updating instance configuration")
     for dev, _, new_logical_id in iv_names.itervalues():
       dev.logical_id = new_logical_id
+      self.cfg.Update(dev, feedback_fn)
 
     self.cfg.Update(self.instance, feedback_fn)
 
@@ -2642,9 +2660,10 @@ class TLReplaceDisks(Tasklet):
     # and now perform the drbd attach
     self.lu.LogInfo("Attaching primary drbds to new secondary"
                     " (standalone => connected)")
+    inst_disks = self.cfg.GetInstanceDisks(self.instance)
     result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
                                             self.new_node_uuid],
-                                           (self.instance.disks, 
self.instance),
+                                           (inst_disks, self.instance),
                                            self.instance.name,
                                            False)
     for to_node, to_result in result.items():
@@ -2768,7 +2787,8 @@ class TemporaryDisk():
     self._feedback_fn("Attempting to create temporary disk")
 
     self._undoing_info = CreateDisks(self._lu, self._instance, 
disks=[new_disk])
-    self._instance.disks.insert(0, new_disk)
+    self._lu.cfg.AddInstDisk(self._instance, new_disk, idx=0)
+    self._instance = self._lu.cfg.GetInstanceInfo(self._instance.uuid)
 
     self._feedback_fn("Temporary disk created")
 
@@ -2787,7 +2807,8 @@ class TemporaryDisk():
       self._EnsureInstanceDiskState()
 
       _UndoCreateDisks(self._lu, self._undoing_info, self._instance)
-      self._instance.disks.pop(0)
+      self._lu.cfg.RemoveInstDisk(self._instance, self._instance.disk[0])
+      self._instance = self._lu.cfg.GetInstanceInfo(self._instance.uuid)
 
       self._feedback_fn("Temporary disk removed")
     except:
diff --git a/lib/cmdlib/instance_utils.py b/lib/cmdlib/instance_utils.py
index 4d73421..0dd952a 100644
--- a/lib/cmdlib/instance_utils.py
+++ b/lib/cmdlib/instance_utils.py
@@ -173,7 +173,7 @@ def BuildInstanceHookEnvByObject(lu, instance, 
secondary_nodes=None,
 
   # Override disks
   if disks is None:
-    disks = instance.disks
+    disks = lu.cfg.GetInstanceDisks(instance)
 
   args = {
     "name": instance.name,
@@ -244,8 +244,12 @@ def RemoveInstance(lu, feedback_fn, instance, 
ignore_failures):
       raise errors.OpExecError("Can't remove instance's disks")
     feedback_fn("Warning: can't remove instance's disks")
 
-  logging.info("Removing instance %s out of cluster config", instance.name)
+  logging.info("Removing instance's disks")
+  # Copy the 'instance.disks' list, because it changes inside 'RemoveInstDisk'
+  for disk in list(instance.disks):
+    lu.cfg.RemoveInstDisk(instance, disk)
 
+  logging.info("Removing instance %s out of cluster config", instance.name)
   lu.cfg.RemoveInstance(instance.uuid)
 
   assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
@@ -277,7 +281,8 @@ def RemoveDisks(lu, instance, target_node_uuid=None, 
ignore_failures=False):
 
   all_result = True
   ports_to_release = set()
-  anno_disks = AnnotateDiskParams(instance, instance.disks, lu.cfg)
+  inst_disks = lu.cfg.GetInstanceDisks(instance)
+  anno_disks = AnnotateDiskParams(instance, inst_disks, lu.cfg)
   for (idx, device) in enumerate(anno_disks):
     if target_node_uuid:
       edata = [(target_node_uuid, device)]
@@ -303,8 +308,8 @@ def RemoveDisks(lu, instance, target_node_uuid=None, 
ignore_failures=False):
   CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template)
 
   if instance.disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]:
-    if len(instance.disks) > 0:
-      file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
+    if len(inst_disks) > 0:
+      file_storage_dir = os.path.dirname(inst_disks[0].logical_id[1])
     else:
       if instance.disk_template == constants.DT_SHARED_FILE:
         file_storage_dir = utils.PathJoin(lu.cfg.GetSharedFileStorageDir(),
diff --git a/lib/config.py b/lib/config.py
index 872ae33..28addc8 100644
--- a/lib/config.py
+++ b/lib/config.py
@@ -553,7 +553,6 @@ class ConfigWriter(object):
     """
     return self._UnlockedGetDiskInfo(disk_uuid)
 
-  # pylint: disable=R0201
   def _UnlockedGetInstanceNodes(self, instance, disks=None):
     """Get all disk-related nodes for an instance.
 
@@ -561,7 +560,7 @@ class ConfigWriter(object):
 
     """
     all_nodes = []
-    inst_disks = instance.disks
+    inst_disks = self._UnlockedGetInstanceDisks(instance)
     if disks is not None:
       inst_disks.extend(disks)
     for disk in inst_disks:
@@ -608,7 +607,6 @@ class ConfigWriter(object):
     """
     return self._UnlockedGetInstanceSecondaryNodes(instance)
 
-  # pylint: disable=R0201
   def _UnlockedGetInstanceLVsByNode(self, instance, lvmap=None):
     """Provide a mapping of node to LVs a given instance owns.
 
@@ -639,7 +637,7 @@ class ConfigWriter(object):
       ret = None
 
     node_uuid = instance.primary_node
-    devs = instance.disks
+    devs = self._UnlockedGetInstanceDisks(instance)
     _MapLVsByNode(lvmap, devs, node_uuid)
     return ret
 
@@ -864,26 +862,6 @@ class ConfigWriter(object):
         lvnames.update(lv_list)
     return lvnames
 
-  def _AllDisks(self):
-    """Compute the list of all Disks (recursively, including children).
-
-    """
-    def DiskAndAllChildren(disk):
-      """Returns a list containing the given disk and all of his children.
-
-      """
-      disks = [disk]
-      if disk.children:
-        for child_disk in disk.children:
-          disks.extend(DiskAndAllChildren(child_disk))
-      return disks
-
-    disks = []
-    for instance in self._ConfigData().instances.values():
-      for disk in instance.disks:
-        disks.extend(DiskAndAllChildren(disk))
-    return disks
-
   def _AllNICs(self):
     """Compute the list of all NICs.
 
@@ -966,33 +944,9 @@ class ConfigWriter(object):
           helper(child, result)
 
     result = []
-    for instance in self._ConfigData().instances.values():
-      for disk in instance.disks:
-        helper(disk, result)
-
-    return result
-
-  def _CheckDiskIDs(self, disk, l_ids):
-    """Compute duplicate disk IDs
-
-    @type disk: L{objects.Disk}
-    @param disk: the disk at which to start searching
-    @type l_ids: list
-    @param l_ids: list of current logical ids
-    @rtype: list
-    @return: a list of error messages
-
-    """
-    result = []
-    if disk.logical_id is not None:
-      if disk.logical_id in l_ids:
-        result.append("duplicate logical id %s" % str(disk.logical_id))
-      else:
-        l_ids.append(disk.logical_id)
+    for disk in self._ConfigData().disks.values():
+      helper(disk, result)
 
-    if disk.children:
-      for child in disk.children:
-        result.extend(self._CheckDiskIDs(child, l_ids))
     return result
 
   def _UnlockedVerifyConfig(self):
@@ -1009,7 +963,6 @@ class ConfigWriter(object):
     ports = {}
     data = self._ConfigData()
     cluster = data.cluster
-    seen_lids = []
 
     # global cluster checks
     if not cluster.enabled_hypervisors:
@@ -1110,6 +1063,20 @@ class ConfigWriter(object):
           )
         )
 
+    # per-disk checks
+    inst_disk_uuids = [d for insts in data.instances.values()
+                       for d in insts.disks]
+    for disk_uuid in data.disks:
+      disk = data.disks[disk_uuid]
+      result.extend(["disk %s error: %s" % (disk.uuid, msg)
+                     for msg in disk.Verify()])
+      if disk.uuid != disk_uuid:
+        result.append("disk '%s' is indexed by wrong UUID '%s'" %
+                      (disk.name, disk_uuid))
+      if disk.uuid not in inst_disk_uuids:
+        result.append("disk '%s' is not attached to any instance" %
+                      disk.uuid)
+
     # per-instance checks
     for instance_uuid in data.instances:
       instance = data.instances[instance_uuid]
@@ -1146,8 +1113,15 @@ class ConfigWriter(object):
         _helper("instance %s" % instance.name, "beparams",
                 cluster.FillBE(instance), constants.BES_PARAMETER_TYPES)
 
+      # check that disks exists
+      for disk_uuid in instance.disks:
+        if disk_uuid not in data.disks:
+          result.append("Instance '%s' has invalid disk '%s'" %
+                        (instance.name, disk_uuid))
+
+      inst_disks = self._UnlockedGetInstanceDisks(instance)
       # gather the drbd ports for duplicate checks
-      for (idx, dsk) in enumerate(instance.disks):
+      for (idx, dsk) in enumerate(inst_disks):
         if dsk.dev_type in constants.DTS_DRBD:
           tcp_port = dsk.logical_id[2]
           if tcp_port not in ports:
@@ -1160,13 +1134,7 @@ class ConfigWriter(object):
           ports[net_port] = []
         ports[net_port].append((instance.name, "network port"))
 
-      # instance disk verify
-      for idx, disk in enumerate(instance.disks):
-        result.extend(["instance '%s' disk %d error: %s" %
-                       (instance.name, idx, msg) for msg in disk.Verify()])
-        result.extend(self._CheckDiskIDs(disk, seen_lids))
-
-      wrong_names = _CheckInstanceDiskIvNames(instance.disks)
+      wrong_names = _CheckInstanceDiskIvNames(inst_disks)
       if wrong_names:
         tmp = "; ".join(("name of disk %s should be '%s', but is '%s'" %
                          (idx, exp_name, actual_name))
@@ -1383,7 +1351,7 @@ class ConfigWriter(object):
     duplicates = []
     my_dict = dict((node_uuid, {}) for node_uuid in self._ConfigData().nodes)
     for instance in self._ConfigData().instances.itervalues():
-      for disk in instance.disks:
+      for disk in self._UnlockedGetInstanceDisks(instance):
         duplicates.extend(_AppendUsedMinors(self._UnlockedGetNodeName,
                                             instance, disk, my_dict))
     for (node_uuid, minor), inst_uuid in self._temporary_drbds.iteritems():
@@ -2030,7 +1998,8 @@ class ConfigWriter(object):
     inst = self._ConfigData().instances[inst_uuid]
     inst.name = new_name
 
-    for (_, disk) in enumerate(inst.disks):
+    inst_disks = self._UnlockedGetInstanceDisks(inst)
+    for (_, disk) in enumerate(inst_disks):
       if disk.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]:
         # rename the file paths in logical and physical id
         file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
@@ -2800,7 +2769,7 @@ class ConfigWriter(object):
             self._ConfigData().nodes.values() +
             self._ConfigData().nodegroups.values() +
             self._ConfigData().networks.values() +
-            self._AllDisks() +
+            self._ConfigData().disks.values() +
             self._AllNICs() +
             [self._ConfigData().cluster])
 
diff --git a/lib/masterd/iallocator.py b/lib/masterd/iallocator.py
index 04a30a0..b91c51d 100644
--- a/lib/masterd/iallocator.py
+++ b/lib/masterd/iallocator.py
@@ -270,7 +270,8 @@ class IAReqRelocate(IARequestBase):
       raise errors.OpPrereqError("Instance has not exactly one secondary node",
                                  errors.ECODE_STATE)
 
-    disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks]
+    inst_disks = cfg.GetInstanceDisks(instance)
+    disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in inst_disks]
     disk_space = gmi.ComputeDiskSize(instance.disk_template, disk_sizes)
 
     return {
@@ -741,6 +742,7 @@ class IAllocator(object):
         if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
           nic_dict["bridge"] = filled_params[constants.NIC_LINK]
         nic_data.append(nic_dict)
+      inst_disks = cfg.GetInstanceDisks(iinfo)
       pir = {
         "tags": list(iinfo.GetTags()),
         "admin_state": iinfo.admin_state,
@@ -755,7 +757,7 @@ class IAllocator(object):
         "disks": [{constants.IDISK_SIZE: dsk.size,
                    constants.IDISK_MODE: dsk.mode,
                    constants.IDISK_SPINDLES: dsk.spindles}
-                  for dsk in iinfo.disks],
+                  for dsk in inst_disks],
         "disk_template": iinfo.disk_template,
         "disks_active": iinfo.disks_active,
         "hypervisor": iinfo.hypervisor,
diff --git a/lib/masterd/instance.py b/lib/masterd/instance.py
index e9feb86..4d3b513 100644
--- a/lib/masterd/instance.py
+++ b/lib/masterd/instance.py
@@ -1166,7 +1166,8 @@ class ExportInstanceHelper:
     src_node = instance.primary_node
     src_node_name = self._lu.cfg.GetNodeName(src_node)
 
-    for idx, disk in enumerate(instance.disks):
+    inst_disks = self._lu.cfg.GetInstanceDisks(instance)
+    for idx, disk in enumerate(inst_disks):
       self._feedback_fn("Creating a snapshot of disk/%s on node %s" %
                         (idx, src_node_name))
 
@@ -1294,6 +1295,7 @@ class ExportInstanceHelper:
 
     """
     instance = self._instance
+    inst_disks = self._lu.cfg.GetInstanceDisks(instance)
 
     assert len(disk_info) == len(instance.disks)
 
@@ -1301,7 +1303,7 @@ class ExportInstanceHelper:
 
     ieloop = ImportExportLoop(self._lu)
     try:
-      for idx, (dev, (host, port, magic)) in enumerate(zip(instance.disks,
+      for idx, (dev, (host, port, magic)) in enumerate(zip(inst_disks,
                                                            disk_info)):
         # Decide whether to use IPv6
         ipv6 = netutils.IP6Address.IsValid(host)
@@ -1496,8 +1498,9 @@ def RemoteImport(lu, feedback_fn, instance, pnode, 
source_x509_ca,
                           len(instance.disks), pnode.primary_ip)
 
     ieloop = ImportExportLoop(lu)
+    inst_disks = lu.cfg.GetInstanceDisks(instance)
     try:
-      for idx, dev in enumerate(instance.disks):
+      for idx, dev in enumerate(inst_disks):
         magic = _GetInstDiskMagic(magic_base, instance.name, idx)
 
         # Import daemon options
diff --git a/lib/objects.py b/lib/objects.py
index 49d6e9c..921816c 100644
--- a/lib/objects.py
+++ b/lib/objects.py
@@ -448,10 +448,9 @@ class ConfigData(ConfigObject):
     @return: boolean indicating if a disk of the given type was found or not
 
     """
-    for instance in self.instances.values():
-      for disk in instance.disks:
-        if disk.IsBasedOnDiskType(dev_type):
-          return True
+    for disk in self.disks.values():
+      if disk.IsBasedOnDiskType(dev_type):
+        return True
     return False
 
   def UpgradeConfig(self):
@@ -1166,8 +1165,8 @@ class Instance(TaggableObject):
 
     @type idx: int
     @param idx: the disk index
-    @rtype: L{Disk}
-    @return: the corresponding disk
+    @rtype: string
+    @return: the corresponding disk's uuid
     @raise errors.OpPrereqError: when the given index is not valid
 
     """
@@ -1194,7 +1193,7 @@ class Instance(TaggableObject):
     if _with_private:
       bo["osparams_private"] = self.osparams_private.Unprivate()
 
-    for attr in "nics", "disks", "disks_info":
+    for attr in "nics", "disks_info":
       alist = bo.get(attr, None)
       if alist:
         nlist = outils.ContainerToDicts(alist)
@@ -1217,7 +1216,6 @@ class Instance(TaggableObject):
       del val["admin_up"]
     obj = super(Instance, cls).FromDict(val)
     obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
-    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
     obj.disks_info = outils.ContainerFromDicts(obj.disks_info, list, Disk)
     return obj
 
@@ -1227,8 +1225,8 @@ class Instance(TaggableObject):
     """
     for nic in self.nics:
       nic.UpgradeConfig()
-    for disk in self.disks:
-      disk.UpgradeConfig()
+    if self.disks is None:
+      self.disks = []
     if self.hvparams:
       for key in constants.HVC_GLOBALS:
         try:
diff --git a/lib/rpc/node.py b/lib/rpc/node.py
index 30431f0..ac13a4c 100644
--- a/lib/rpc/node.py
+++ b/lib/rpc/node.py
@@ -886,7 +886,8 @@ class RpcRunner(_RpcClientBase,
     idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams)
     if osp is not None:
       idict["osparams"].update(osp)
-    idict["disks_info"] = self._DisksDictDP(node, (instance.disks, instance))
+    disks = self._cfg.GetInstanceDisks(instance)
+    idict["disks_info"] = self._DisksDictDP(node, (disks, instance))
     for nic in idict["nics"]:
       nic["nicparams"] = objects.FillDict(
         cluster.nicparams[constants.PP_DEFAULT],
diff --git a/src/Ganeti/Config.hs b/src/Ganeti/Config.hs
index eb55b44..089b736 100644
--- a/src/Ganeti/Config.hs
+++ b/src/Ganeti/Config.hs
@@ -348,9 +348,8 @@ getInstAllNodes cfg name = do
 
 -- | Get disks for a given instance object.
 getInstDisks :: ConfigData -> Instance -> ErrorResult [Disk]
-getInstDisks _cfg =
-  -- mapM (getDisk cfg) . instDisks
-  return . instDisks
+getInstDisks cfg =
+  mapM (getDisk cfg) . instDisks
 
 -- | Get disks for a given instance.
 -- The instance is specified by name or uuid.
diff --git a/src/Ganeti/Objects.hs b/src/Ganeti/Objects.hs
index e5f742c..c03279b 100644
--- a/src/Ganeti/Objects.hs
+++ b/src/Ganeti/Objects.hs
@@ -461,7 +461,7 @@ $(buildObject "Instance" "inst" $
   , simpleField "osparams_private" [t| OsParamsPrivate    |]
   , simpleField "admin_state"      [t| AdminState         |]
   , simpleField "nics"             [t| [PartialNic]       |]
-  , simpleField "disks"            [t| [Disk]             |]
+  , simpleField "disks"            [t| [String]           |]
   , simpleField "disk_template"    [t| DiskTemplate       |]
   , simpleField "disks_active"     [t| Bool               |]
   , optionalField $ simpleField "network_port" [t| Int  |]
-- 
1.9.1

Reply via email to