This patch replaces 'instance.disks' with 'GetInstanceDisks' everywhere in the codebase. From now on, the function 'GetInstanceDisks' from the config file has to be used in order to get the disks of an instance. Also the functions 'AddInstanceDisk'/'RemoveInstanceDisk' have to be used to add/remove a disk object to/from the config file.
Signed-off-by: Ilias Tsitsimpis <[email protected]> --- lib/cmdlib/backup.py | 7 ++- lib/cmdlib/cluster.py | 6 +- lib/cmdlib/common.py | 10 ++-- lib/cmdlib/group.py | 6 +- lib/cmdlib/instance.py | 123 +++++++++++++++++++++++++-------------- lib/cmdlib/instance_migration.py | 21 ++++--- lib/cmdlib/instance_query.py | 2 +- lib/cmdlib/instance_storage.py | 80 +++++++++++++++---------- lib/cmdlib/instance_utils.py | 14 +++-- lib/config.py | 91 ++++++++++------------------- lib/masterd/iallocator.py | 6 +- lib/masterd/instance.py | 9 ++- lib/objects.py | 18 +++--- lib/rpc/node.py | 3 +- src/Ganeti/Config.hs | 5 +- src/Ganeti/Objects.hs | 2 +- 16 files changed, 223 insertions(+), 180 deletions(-) diff --git a/lib/cmdlib/backup.py b/lib/cmdlib/backup.py index d6ac7fe..a83d2f0 100644 --- a/lib/cmdlib/backup.py +++ b/lib/cmdlib/backup.py @@ -277,7 +277,7 @@ class LUBackupExport(LogicalUnit): # instance disk type verification # TODO: Implement export support for file-based disks - for disk in self.instance.disks: + for disk in self.cfg.GetInstanceDisks(self.instance.uuid): if disk.dev_type in constants.DTS_FILEBASED: raise errors.OpPrereqError("Export not supported for instances with" " file-based disks", errors.ECODE_INVAL) @@ -316,7 +316,7 @@ class LUBackupExport(LogicalUnit): self.secondary_nodes = \ self.cfg.GetInstanceSecondaryNodes(self.instance.uuid) - self.inst_disks = self.instance.disks + self.inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) def _CleanupExports(self, feedback_fn): """Removes exports of current instance from all other nodes. @@ -402,7 +402,8 @@ class LUBackupExport(LogicalUnit): @return: Size of the disks in MiB """ - return sum([d.size for d in self.instance.disks]) + inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) + return sum([d.size for d in inst_disks]) def ZeroFreeSpace(self, feedback_fn): """Zeroes the free space on a shutdown instance. diff --git a/lib/cmdlib/cluster.py b/lib/cmdlib/cluster.py index 9f0dd3a..a6be0f4 100644 --- a/lib/cmdlib/cluster.py +++ b/lib/cmdlib/cluster.py @@ -611,7 +611,7 @@ class LUClusterRepairDiskSizes(NoHooksLU): pnode = instance.primary_node if pnode not in per_node_disks: per_node_disks[pnode] = [] - for idx, disk in enumerate(instance.disks): + for idx, disk in enumerate(self.cfg.GetInstanceDisks(instance.uuid)): per_node_disks[pnode].append((instance, idx, disk)) assert not (frozenset(per_node_disks.keys()) - @@ -2394,7 +2394,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors): " that have exclusive storage set: %s", instance.disk_template, utils.CommaJoin(self.cfg.GetNodeNames(es_nodes))) - for (idx, disk) in enumerate(instance.disks): + for (idx, disk) in enumerate(self.cfg.GetInstanceDisks(instance.uuid)): self._ErrorIf(disk.spindles is None, constants.CV_EINSTANCEMISSINGCFGPARAMETER, instance.name, "number of spindles not configured for disk %s while" @@ -3072,7 +3072,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors): if instanceinfo[uuid].disk_template == diskless) disks = [(inst_uuid, disk) for inst_uuid in node_inst_uuids - for disk in instanceinfo[inst_uuid].disks] + for disk in self.cfg.GetInstanceDisks(inst_uuid)] if not disks: nodisk_instances.update(uuid for uuid in node_inst_uuids diff --git a/lib/cmdlib/common.py b/lib/cmdlib/common.py index 3353e7e..e437d7b 100644 --- a/lib/cmdlib/common.py +++ b/lib/cmdlib/common.py @@ -616,10 +616,11 @@ def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg, cpu_count = be_full[constants.BE_VCPUS] inst_nodes = cfg.GetInstanceNodes(instance.uuid) es_flags = rpc.GetExclusiveStorageForNodes(cfg, inst_nodes) + disks = cfg.GetInstanceDisks(instance.uuid) if any(es_flags.values()): # With exclusive storage use the actual spindles try: - spindle_use = sum([disk.spindles for disk in instance.disks]) + spindle_use = sum([disk.spindles for disk in disks]) except TypeError: ret.append("Number of spindles not configured for disks of instance %s" " while exclusive storage is enabled, try running gnt-cluster" @@ -628,8 +629,8 @@ def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg, spindle_use = None else: spindle_use = be_full[constants.BE_SPINDLE_USE] - disk_count = len(instance.disks) - disk_sizes = [disk.size for disk in instance.disks] + disk_count = len(disks) + disk_sizes = [disk.size for disk in disks] nic_count = len(instance.nics) disk_template = instance.disk_template @@ -1115,8 +1116,9 @@ def CheckIAllocatorOrNode(lu, iallocator_slot, node_slot): def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_uuid, prereq): faulty = [] + disks = cfg.GetInstanceDisks(instance.uuid) result = rpc_runner.call_blockdev_getmirrorstatus( - node_uuid, (instance.disks, instance)) + node_uuid, (disks, instance)) result.Raise("Failed to get disk status from node %s" % cfg.GetNodeName(node_uuid), prereq=prereq, ecode=errors.ECODE_ENVIRON) diff --git a/lib/cmdlib/group.py b/lib/cmdlib/group.py index eff1407..9dc990c 100644 --- a/lib/cmdlib/group.py +++ b/lib/cmdlib/group.py @@ -915,7 +915,8 @@ class LUGroupVerifyDisks(NoHooksLU): node_to_inst.setdefault(node_uuid, []).append(inst) for (node_uuid, insts) in node_to_inst.items(): - node_disks = [(inst.disks, inst) for inst in insts] + node_disks = [(self.cfg.GetInstanceDisks(inst.uuid), inst) + for inst in insts] node_res = self.rpc.call_drbd_needs_activation(node_uuid, node_disks) msg = node_res.fail_msg if msg: @@ -926,7 +927,8 @@ class LUGroupVerifyDisks(NoHooksLU): faulty_disk_uuids = set(node_res.payload) for inst in self.instances.values(): - inst_disk_uuids = set([disk.uuid for disk in inst.disks]) + disks = self.cfg.GetInstanceDisks(inst.uuid) + inst_disk_uuids = set([disk.uuid for disk in disks]) if inst_disk_uuids.intersection(faulty_disk_uuids): offline_disk_instance_names.add(inst.name) diff --git a/lib/cmdlib/instance.py b/lib/cmdlib/instance.py index 95b40e3..979f450 100644 --- a/lib/cmdlib/instance.py +++ b/lib/cmdlib/instance.py @@ -1365,6 +1365,8 @@ class LUInstanceCreate(LogicalUnit): if disk_abort: RemoveDisks(self, instance) + for disk_uuid in instance.disks: + self.cfg.RemoveInstanceDisk(instance.uuid, disk_uuid) self.cfg.RemoveInstance(instance.uuid) # Make sure the instance lock gets removed self.remove_locks[locking.LEVEL_INSTANCE] = instance.name @@ -1411,7 +1413,7 @@ class LUInstanceCreate(LogicalUnit): uuid=instance_uuid, os=os_type, primary_node=self.pnode.uuid, - nics=self.nics, disks=disks, + nics=self.nics, disks=[], disk_template=self.op.disk_template, disks_active=False, admin_state=constants.ADMINST_DOWN, @@ -1442,16 +1444,22 @@ class LUInstanceCreate(LogicalUnit): else: feedback_fn("* creating instance disks...") try: - CreateDisks(self, iobj, instance_disks=iobj.disks) + CreateDisks(self, iobj, instance_disks=disks) except errors.OpExecError: self.LogWarning("Device creation failed") self.cfg.ReleaseDRBDMinors(self.op.instance_name) raise feedback_fn("adding instance %s to cluster config" % self.op.instance_name) - self.cfg.AddInstance(iobj, self.proc.GetECId()) + feedback_fn("adding disks to cluster config") + for disk in disks: + self.cfg.AddInstanceDisk(iobj.uuid, disk) + + # re-read the instance from the configuration + iobj = self.cfg.GetInstanceInfo(iobj.uuid) + # Declare that we don't want to remove the instance lock anymore, as we've # added the instance to the config del self.remove_locks[locking.LEVEL_INSTANCE] @@ -1498,6 +1506,7 @@ class LUInstanceCreate(LogicalUnit): ReleaseLocks(self, locking.LEVEL_NODE_RES) if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks: + disks = self.cfg.GetInstanceDisks(iobj.uuid) if self.op.mode == constants.INSTANCE_CREATE: os_image = objects.GetOSImage(self.op.osparams) @@ -1507,8 +1516,8 @@ class LUInstanceCreate(LogicalUnit): if pause_sync: feedback_fn("* pausing disk sync to install instance OS") result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid, - (iobj.disks, - iobj), True) + (disks, iobj), + True) for idx, success in enumerate(result.payload): if not success: logging.warn("pause-sync of instance %s for disk %d failed", @@ -1524,8 +1533,8 @@ class LUInstanceCreate(LogicalUnit): if pause_sync: feedback_fn("* resuming disk sync") result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid, - (iobj.disks, - iobj), False) + (disks, iobj), + False) for idx, success in enumerate(result.payload): if not success: logging.warn("resume-sync of instance %s for disk %d failed", @@ -1547,10 +1556,10 @@ class LUInstanceCreate(LogicalUnit): if iobj.os: dst_io = constants.IEIO_SCRIPT - dst_ioargs = ((iobj.disks[idx], iobj), idx) + dst_ioargs = ((disks[idx], iobj), idx) else: dst_io = constants.IEIO_RAW_DISK - dst_ioargs = (iobj.disks[idx], iobj) + dst_ioargs = (disks[idx], iobj) # FIXME: pass debug option from opcode to backend dt = masterd.instance.DiskTransfer("disk/%s" % idx, @@ -1717,8 +1726,8 @@ class LUInstanceRename(LogicalUnit): if (self.instance.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE) and self.op.new_name != self.instance.name): - old_file_storage_dir = os.path.dirname( - self.instance.disks[0].logical_id[1]) + disks = self.cfg.GetInstanceDisks(self.instance.uuid) + old_file_storage_dir = os.path.dirname(disks[0].logical_id[1]) rename_file_storage = True self.cfg.RenameInstance(self.instance.uuid, self.op.new_name) @@ -1729,10 +1738,10 @@ class LUInstanceRename(LogicalUnit): # re-read the instance from the configuration after rename renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid) + disks = self.cfg.GetInstanceDisks(renamed_inst.uuid) if rename_file_storage: - new_file_storage_dir = os.path.dirname( - renamed_inst.disks[0].logical_id[1]) + new_file_storage_dir = os.path.dirname(disks[0].logical_id[1]) result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node, old_file_storage_dir, new_file_storage_dir) @@ -1746,7 +1755,7 @@ class LUInstanceRename(LogicalUnit): # update info on disks info = GetInstanceInfoText(renamed_inst) - for (idx, disk) in enumerate(renamed_inst.disks): + for (idx, disk) in enumerate(disks): for node_uuid in self.cfg.GetInstanceNodes(renamed_inst.uuid): result = self.rpc.call_blockdev_setinfo(node_uuid, (disk, renamed_inst), info) @@ -1820,7 +1829,7 @@ class LUInstanceRemove(LogicalUnit): "Cannot retrieve locked instance %s" % self.op.instance_name self.secondary_nodes = \ self.cfg.GetInstanceSecondaryNodes(self.instance.uuid) - self.inst_disks = self.instance.disks + self.inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) def Exec(self, feedback_fn): """Remove the instance. @@ -1926,7 +1935,8 @@ class LUInstanceMove(LogicalUnit): cluster = self.cfg.GetClusterInfo() bep = cluster.FillBE(self.instance) - for idx, dsk in enumerate(self.instance.disks): + disks = self.cfg.GetInstanceDisks(self.instance.uuid) + for idx, dsk in enumerate(disks): if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE, constants.DT_SHARED_FILE, constants.DT_GLUSTER): raise errors.OpPrereqError("Instance disk %d has a complex layout," @@ -1993,7 +2003,8 @@ class LUInstanceMove(LogicalUnit): errs = [] transfers = [] # activate, get path, create transfer jobs - for idx, disk in enumerate(self.instance.disks): + disks = self.cfg.GetInstanceDisks(self.instance.uuid) + for idx, disk in enumerate(disks): # FIXME: pass debug option from opcode to backend dt = masterd.instance.DiskTransfer("disk/%s" % idx, constants.IEIO_RAW_DISK, @@ -2002,6 +2013,7 @@ class LUInstanceMove(LogicalUnit): (disk, self.instance), None) transfers.append(dt) + self.cfg.Update(disk, feedback_fn) import_result = \ masterd.instance.TransferInstanceData(self, feedback_fn, @@ -2418,16 +2430,6 @@ def _ApplyContainerMods(kind, container, chgdesc, mods, chgdesc.extend(changes) -def _UpdateIvNames(base_index, disks): - """Updates the C{iv_name} attribute of disks. - - @type disks: list of L{objects.Disk} - - """ - for (idx, disk) in enumerate(disks): - disk.iv_name = "disk/%s" % (base_index + idx, ) - - class LUInstanceSetParams(LogicalUnit): """Modifies an instances's parameters. @@ -2858,7 +2860,7 @@ class LUInstanceSetParams(LogicalUnit): assert self.instance.disk_template == constants.DT_PLAIN disks = [{constants.IDISK_SIZE: d.size, constants.IDISK_VG: d.logical_id[0]} - for d in self.instance.disks] + for d in self.cfg.GetInstanceDisks(self.instance.uuid)] required = ComputeDiskSizePerVG(self.op.disk_template, disks) CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required) @@ -2955,7 +2957,8 @@ class LUInstanceSetParams(LogicalUnit): disk.name = params.get(constants.IDISK_NAME, None) # Verify disk changes (operating on a copy) - disks = copy.deepcopy(self.instance.disks) + inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) + disks = copy.deepcopy(inst_disks) _ApplyContainerMods("disk", disks, None, self.diskmod, None, _PrepareDiskMod, None) utils.ValidateDeviceNames("disk", disks) @@ -2963,7 +2966,7 @@ class LUInstanceSetParams(LogicalUnit): raise errors.OpPrereqError("Instance has too many disks (%d), cannot add" " more" % constants.MAX_DISKS, errors.ECODE_STATE) - disk_sizes = [disk.size for disk in self.instance.disks] + disk_sizes = [disk.size for disk in inst_disks] disk_sizes.extend(params["size"] for (op, idx, params, private) in self.diskmod if op == constants.DDM_ADD) ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes) @@ -3389,11 +3392,12 @@ class LUInstanceSetParams(LogicalUnit): assert self.instance.disk_template == constants.DT_PLAIN + old_disks = self.cfg.GetInstanceDisks(self.instance.uuid) # create a fake disk info for _GenerateDiskTemplate disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode, constants.IDISK_VG: d.logical_id[0], constants.IDISK_NAME: d.name} - for d in self.instance.disks] + for d in old_disks] new_disks = GenerateDiskTemplate(self, self.op.disk_template, self.instance.uuid, pnode_uuid, [snode_uuid], disk_info, None, None, 0, @@ -3415,7 +3419,7 @@ class LUInstanceSetParams(LogicalUnit): # old ones feedback_fn("Renaming original volumes...") rename_list = [(o, n.children[0].logical_id) - for (o, n) in zip(self.instance.disks, new_disks)] + for (o, n) in zip(old_disks, new_disks)] result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list) result.Raise("Failed to rename original LVs") @@ -3432,16 +3436,27 @@ class LUInstanceSetParams(LogicalUnit): feedback_fn("Initializing of DRBD devices failed;" " renaming back original volumes...") rename_back_list = [(n.children[0], o.logical_id) - for (n, o) in zip(new_disks, self.instance.disks)] + for (n, o) in zip(new_disks, old_disks)] result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list) result.Raise("Failed to rename LVs back after error %s" % str(e)) raise - # at this point, the instance has been modified + # Remove the old disks from the instance + for old_disk in old_disks: + self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid) + + # Update instance structure + self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) self.instance.disk_template = constants.DT_DRBD8 - self.instance.disks = new_disks self.cfg.Update(self.instance, feedback_fn) + # Attach the new disks to the instance + for (idx, new_disk) in enumerate(new_disks): + self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx) + + # re-read the instance from the configuration + self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) + # Release node locks while waiting for sync ReleaseLocks(self, locking.LEVEL_NODE) @@ -3466,8 +3481,9 @@ class LUInstanceSetParams(LogicalUnit): snode_uuid = secondary_nodes[0] feedback_fn("Converting template to plain") - old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg) - new_disks = [d.children[0] for d in self.instance.disks] + disks = self.cfg.GetInstanceDisks(self.instance.uuid) + old_disks = AnnotateDiskParams(self.instance, disks, self.cfg) + new_disks = [d.children[0] for d in disks] # copy over size, mode and name for parent, child in zip(old_disks, new_disks): @@ -3481,12 +3497,22 @@ class LUInstanceSetParams(LogicalUnit): tcp_port = disk.logical_id[2] self.cfg.AddTcpUdpPort(tcp_port) - # update instance structure - self.instance.disks = new_disks + # Remove the old disks from the instance + for old_disk in old_disks: + self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid) + + # Update instance structure + self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) self.instance.disk_template = constants.DT_PLAIN - _UpdateIvNames(0, self.instance.disks) self.cfg.Update(self.instance, feedback_fn) + # Attach the new disks to the instance + for (idx, new_disk) in enumerate(new_disks): + self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx) + + # re-read the instance from the configuration + self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) + # Release locks in case removing disks takes a while ReleaseLocks(self, locking.LEVEL_NODE) @@ -3528,8 +3554,9 @@ class LUInstanceSetParams(LogicalUnit): """ # add a new disk + instance_disks = self.cfg.GetInstanceDisks(self.instance.uuid) if self.instance.disk_template in constants.DTS_FILEBASED: - (file_driver, file_path) = self.instance.disks[0].logical_id + (file_driver, file_path) = instance_disks[0].logical_id file_path = os.path.dirname(file_path) else: file_driver = file_path = None @@ -3542,6 +3569,10 @@ class LUInstanceSetParams(LogicalUnit): file_driver, idx, self.Log, self.diskparams)[0] new_disks = CreateDisks(self, self.instance, disks=[disk]) + self.cfg.AddInstanceDisk(self.instance.uuid, disk, idx) + + # re-read the instance from the configuration + self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) if self.cluster.prealloc_wipe_disks: # Wipe new disk @@ -3635,6 +3666,12 @@ class LUInstanceSetParams(LogicalUnit): if root.dev_type in constants.DTS_DRBD: self.cfg.AddTcpUdpPort(root.logical_id[2]) + # Remove disk from config + self.cfg.RemoveInstanceDisk(self.instance.uuid, root.uuid) + + # re-read the instance from the configuration + self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) + return hotmsg def _CreateNewNic(self, idx, params, private): @@ -3735,10 +3772,10 @@ class LUInstanceSetParams(LogicalUnit): result.append(("runtime_memory", self.op.runtime_mem)) # Apply disk changes - _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod, + inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) + _ApplyContainerMods("disk", inst_disks, result, self.diskmod, self._CreateNewDisk, self._ModifyDisk, self._RemoveDisk, post_add_fn=self._PostAddDisk) - _UpdateIvNames(0, self.instance.disks) if self.op.disk_template: if __debug__: diff --git a/lib/cmdlib/instance_migration.py b/lib/cmdlib/instance_migration.py index b1a65f8..31a175c 100644 --- a/lib/cmdlib/instance_migration.py +++ b/lib/cmdlib/instance_migration.py @@ -484,11 +484,11 @@ class TLMigrateInstance(Tasklet): """ self.feedback_fn("* wait until resync is done") all_done = False + disks = self.cfg.GetInstanceDisks(self.instance.uuid) while not all_done: all_done = True result = self.rpc.call_drbd_wait_sync(self.all_node_uuids, - (self.instance.disks, - self.instance)) + (disks, self.instance)) min_percent = 100 for node_uuid, nres in result.items(): nres.Raise("Cannot resync disks on node %s" % @@ -509,8 +509,9 @@ class TLMigrateInstance(Tasklet): self.feedback_fn("* switching node %s to secondary mode" % self.cfg.GetNodeName(node_uuid)) + disks = self.cfg.GetInstanceDisks(self.instance.uuid) result = self.rpc.call_blockdev_close(node_uuid, self.instance.name, - (self.instance.disks, self.instance)) + (disks, self.instance)) result.Raise("Cannot change disk to secondary on node %s" % self.cfg.GetNodeName(node_uuid)) @@ -519,8 +520,9 @@ class TLMigrateInstance(Tasklet): """ self.feedback_fn("* changing into standalone mode") + disks = self.cfg.GetInstanceDisks(self.instance.uuid) result = self.rpc.call_drbd_disconnect_net( - self.all_node_uuids, (self.instance.disks, self.instance)) + self.all_node_uuids, (disks, self.instance)) for node_uuid, nres in result.items(): nres.Raise("Cannot disconnect disks node %s" % self.cfg.GetNodeName(node_uuid)) @@ -534,8 +536,9 @@ class TLMigrateInstance(Tasklet): else: msg = "single-master" self.feedback_fn("* changing disks into %s mode" % msg) + disks = self.cfg.GetInstanceDisks(self.instance.uuid) result = self.rpc.call_drbd_attach_net(self.all_node_uuids, - (self.instance.disks, self.instance), + (disks, self.instance), self.instance.name, multimaster) for node_uuid, nres in result.items(): nres.Raise("Cannot change disks config on node %s" % @@ -681,7 +684,7 @@ class TLMigrateInstance(Tasklet): (src_version, dst_version)) self.feedback_fn("* checking disk consistency between source and target") - for (idx, dev) in enumerate(self.instance.disks): + for (idx, dev) in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)): if not CheckDiskConsistency(self.lu, self.instance, dev, self.target_node_uuid, False): @@ -816,7 +819,8 @@ class TLMigrateInstance(Tasklet): # If the instance's disk template is `rbd' or `ext' and there was a # successful migration, unmap the device from the source node. if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT): - disks = ExpandCheckDisks(self.instance, self.instance.disks) + inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) + disks = ExpandCheckDisks(inst_disks, inst_disks) self.feedback_fn("* unmapping instance's disks from %s" % self.cfg.GetNodeName(self.source_node_uuid)) for disk in disks: @@ -847,7 +851,8 @@ class TLMigrateInstance(Tasklet): if self.instance.disks_active: self.feedback_fn("* checking disk consistency between source and target") - for (idx, dev) in enumerate(self.instance.disks): + inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) + for (idx, dev) in enumerate(inst_disks): # for drbd, these are drbd over lvm if not CheckDiskConsistency(self.lu, self.instance, dev, self.target_node_uuid, False): diff --git a/lib/cmdlib/instance_query.py b/lib/cmdlib/instance_query.py index 2be0163..6fd0e8c 100644 --- a/lib/cmdlib/instance_query.py +++ b/lib/cmdlib/instance_query.py @@ -247,7 +247,7 @@ class LUInstanceQueryData(NoHooksLU): disks = map(compat.partial(self._ComputeDiskStatus, instance, node_uuid2name_fn), - instance.disks) + self.cfg.GetInstanceDisks(instance.uuid)) secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid) snodes_group_uuids = [nodes[snode_uuid].group diff --git a/lib/cmdlib/instance_storage.py b/lib/cmdlib/instance_storage.py index 3cf9447..43442ea 100644 --- a/lib/cmdlib/instance_storage.py +++ b/lib/cmdlib/instance_storage.py @@ -233,7 +233,7 @@ def CreateDisks(lu, instance, instance_disks=None, """ info = GetInstanceInfoText(instance) if instance_disks is None: - instance_disks = instance.disks + instance_disks = lu.cfg.GetInstanceDisks(instance.uuid) if target_node_uuid is None: pnode_uuid = instance.primary_node # We cannot use config's 'GetInstanceNodes' here as 'CreateDisks' @@ -613,7 +613,7 @@ class LUInstanceRecreateDisks(LogicalUnit): constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode, constants.IDISK_SPINDLES: d.spindles, - } for d in self.instance.disks] + } for d in self.cfg.GetInstanceDisks(self.instance.uuid)] req = iallocator.IAReqInstanceAlloc(name=self.op.instance_name, disk_template=disk_template, tags=list(self.instance.GetTags()), @@ -826,7 +826,8 @@ class LUInstanceRecreateDisks(LogicalUnit): to_skip = [] mods = [] # keeps track of needed changes - for idx, disk in enumerate(self.instance.disks): + inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) + for idx, disk in enumerate(inst_disks): try: changes = self.disks[idx] except KeyError: @@ -854,7 +855,7 @@ class LUInstanceRecreateDisks(LogicalUnit): # now that we have passed all asserts above, we can apply the mods # in a single run (to avoid partial changes) for idx, new_id, changes in mods: - disk = self.instance.disks[idx] + disk = inst_disks[idx] if new_id is not None: assert disk.dev_type == constants.DT_DRBD8 disk.logical_id = new_id @@ -862,6 +863,7 @@ class LUInstanceRecreateDisks(LogicalUnit): disk.Update(size=changes.get(constants.IDISK_SIZE, None), mode=changes.get(constants.IDISK_MODE, None), spindles=changes.get(constants.IDISK_SPINDLES, None)) + self.cfg.Update(disk, feedback_fn) # change primary node, if needed if self.op.node_uuids: @@ -879,9 +881,10 @@ class LUInstanceRecreateDisks(LogicalUnit): new_disks = CreateDisks(self, self.instance, to_skip=to_skip) # TODO: Release node locks before wiping, or explain why it's not possible + inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) if self.cfg.GetClusterInfo().prealloc_wipe_disks: wipedisks = [(idx, disk, 0) - for (idx, disk) in enumerate(self.instance.disks) + for (idx, disk) in enumerate(inst_disks) if idx not in to_skip] WipeOrCleanupDisks(self, self.instance, disks=wipedisks, cleanup=new_disks) @@ -1038,8 +1041,9 @@ def WipeDisks(lu, instance, disks=None): node_name = lu.cfg.GetNodeName(node_uuid) if disks is None: + inst_disks = lu.cfg.GetInstanceDisks(instance.uuid) disks = [(idx, disk, 0) - for (idx, disk) in enumerate(instance.disks)] + for (idx, disk) in enumerate(inst_disks)] logging.info("Pausing synchronization of disks of instance '%s'", instance.name) @@ -1132,10 +1136,11 @@ def ImageDisks(lu, instance, image, disks=None): node_uuid = instance.primary_node node_name = lu.cfg.GetNodeName(node_uuid) + inst_disks = lu.cfg.GetInstanceDisks(instance.uuid) if disks is None: - disks = [(0, instance.disks[0])] + disks = [(0, inst_disks[0])] else: - disks = map(lambda idx: (idx, instance.disks[idx]), disks) + disks = map(lambda idx: (idx, inst_disks[idx]), disks) logging.info("Pausing synchronization of disks of instance '%s'", instance.name) @@ -1200,7 +1205,7 @@ def WipeOrCleanupDisks(lu, instance, disks=None, cleanup=None): raise -def ExpandCheckDisks(instance, disks): +def ExpandCheckDisks(instance_disks, disks): """Return the instance disks selected by the disks list @type disks: list of L{objects.Disk} or None @@ -1210,12 +1215,14 @@ def ExpandCheckDisks(instance, disks): """ if disks is None: - return instance.disks + return instance_disks else: - if not set(disks).issubset(instance.disks): + inst_disks_uuids = [d.uuid for d in instance_disks] + disks_uuids = [d.uuid for d in disks] + if not set(disks_uuids).issubset(inst_disks_uuids): raise errors.ProgrammerError("Can only act on disks belonging to the" - " target instance: expected a subset of %r," - " got %r" % (instance.disks, disks)) + " target instance: expected a subset of %s," + " got %s" % (inst_disks_uuids, disks_uuids)) return disks @@ -1223,10 +1230,11 @@ def WaitForSync(lu, instance, disks=None, oneshot=False): """Sleep and poll for an instance's disk to sync. """ - if not instance.disks or disks is not None and not disks: + inst_disks = lu.cfg.GetInstanceDisks(instance.uuid) + if not inst_disks or disks is not None and not disks: return True - disks = ExpandCheckDisks(instance, disks) + disks = ExpandCheckDisks(inst_disks, disks) if not oneshot: lu.LogInfo("Waiting for instance %s to sync disks", instance.name) @@ -1311,7 +1319,8 @@ def ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False): if disks is None: # only mark instance disks as inactive if all disks are affected lu.cfg.MarkInstanceDisksInactive(instance.uuid) - disks = ExpandCheckDisks(instance, disks) + inst_disks = lu.cfg.GetInstanceDisks(instance.uuid) + disks = ExpandCheckDisks(inst_disks, disks) for disk in disks: for node_uuid, top_disk in disk.ComputeNodeTree(instance.primary_node): @@ -1371,7 +1380,8 @@ def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False, # only mark instance disks as active if all disks are affected instance = lu.cfg.MarkInstanceDisksActive(instance.uuid) - disks = ExpandCheckDisks(instance, disks) + inst_disks = lu.cfg.GetInstanceDisks(instance.uuid) + disks = ExpandCheckDisks(inst_disks, disks) # With the two passes mechanism we try to reduce the window of # opportunity for the race condition of switching DRBD to primary @@ -1516,7 +1526,7 @@ class LUInstanceGrowDisk(LogicalUnit): raise errors.OpPrereqError("Instance's disk layout does not support" " growing", errors.ECODE_INVAL) - self.disk = self.instance.FindDisk(self.op.disk) + self.disk = self.cfg.GetDiskInfo(self.instance.FindDisk(self.op.disk)) if self.op.absolute: self.target = self.op.amount @@ -1630,7 +1640,8 @@ class LUInstanceGrowDisk(LogicalUnit): assert wipe_disks ^ (old_disk_size is None) if wipe_disks: - assert self.instance.disks[self.op.disk] == self.disk + inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) + assert inst_disks[self.op.disk] == self.disk # Wipe newly added disk space WipeDisks(self, self.instance, @@ -2019,7 +2030,7 @@ class TLReplaceDisks(Tasklet): """ node_uuids = self.cfg.GetInstanceNodes(instance.uuid) - for idx, dev in enumerate(instance.disks): + for idx, dev in enumerate(self.cfg.GetInstanceDisks(instance.uuid)): for node_uuid in node_uuids: self.lu.LogInfo("Checking disk/%d on %s", idx, self.cfg.GetNodeName(node_uuid)) @@ -2272,7 +2283,7 @@ class TLReplaceDisks(Tasklet): def _CheckDisksExistence(self, node_uuids): # Check disk existence - for idx, dev in enumerate(self.instance.disks): + for idx, dev in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)): if idx not in self.disks: continue @@ -2297,7 +2308,7 @@ class TLReplaceDisks(Tasklet): extra_hint)) def _CheckDisksConsistency(self, node_uuid, on_primary, ldisk): - for idx, dev in enumerate(self.instance.disks): + for idx, dev in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)): if idx not in self.disks: continue @@ -2320,7 +2331,8 @@ class TLReplaceDisks(Tasklet): """ iv_names = {} - disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg) + inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) + disks = AnnotateDiskParams(self.instance, inst_disks, self.cfg) for idx, dev in enumerate(disks): if idx not in self.disks: continue @@ -2563,7 +2575,8 @@ class TLReplaceDisks(Tasklet): # Step: create new storage self.lu.LogStep(3, steps_total, "Allocate new storage") - disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg) + inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) + disks = AnnotateDiskParams(self.instance, inst_disks, self.cfg) excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg, self.new_node_uuid) for idx, dev in enumerate(disks): @@ -2583,12 +2596,12 @@ class TLReplaceDisks(Tasklet): # error and the success paths self.lu.LogStep(4, steps_total, "Changing drbd configuration") minors = self.cfg.AllocateDRBDMinor([self.new_node_uuid - for _ in self.instance.disks], + for _ in inst_disks], self.instance.uuid) logging.debug("Allocated minors %r", minors) iv_names = {} - for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)): + for idx, (dev, new_minor) in enumerate(zip(inst_disks, minors)): self.lu.LogInfo("activating a new drbd on %s for disk/%d" % (self.cfg.GetNodeName(self.new_node_uuid), idx)) # create new devices on new_node; note that we create two IDs: @@ -2627,7 +2640,7 @@ class TLReplaceDisks(Tasklet): raise # We have new devices, shutdown the drbd on the old secondary - for idx, dev in enumerate(self.instance.disks): + for idx, dev in enumerate(inst_disks): self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx) msg = self.rpc.call_blockdev_shutdown(self.target_node_uuid, (dev, self.instance)).fail_msg @@ -2639,7 +2652,7 @@ class TLReplaceDisks(Tasklet): self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)") result = self.rpc.call_drbd_disconnect_net( - [pnode], (self.instance.disks, self.instance))[pnode] + [pnode], (inst_disks, self.instance))[pnode] msg = result.fail_msg if msg: @@ -2653,6 +2666,7 @@ class TLReplaceDisks(Tasklet): self.lu.LogInfo("Updating instance configuration") for dev, _, new_logical_id in iv_names.itervalues(): dev.logical_id = new_logical_id + self.cfg.Update(dev, feedback_fn) self.cfg.Update(self.instance, feedback_fn) @@ -2662,9 +2676,10 @@ class TLReplaceDisks(Tasklet): # and now perform the drbd attach self.lu.LogInfo("Attaching primary drbds to new secondary" " (standalone => connected)") + inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) result = self.rpc.call_drbd_attach_net([self.instance.primary_node, self.new_node_uuid], - (self.instance.disks, self.instance), + (inst_disks, self.instance), self.instance.name, False) for to_node, to_result in result.items(): @@ -2788,7 +2803,8 @@ class TemporaryDisk(): self._feedback_fn("Attempting to create temporary disk") self._undoing_info = CreateDisks(self._lu, self._instance, disks=[new_disk]) - self._instance.disks.insert(0, new_disk) + self._lu.cfg.AddInstanceDisk(self._instance.uuid, new_disk, idx=0) + self._instance = self._lu.cfg.GetInstanceInfo(self._instance.uuid) self._feedback_fn("Temporary disk created") @@ -2807,7 +2823,9 @@ class TemporaryDisk(): self._EnsureInstanceDiskState() _UndoCreateDisks(self._lu, self._undoing_info, self._instance) - self._instance.disks.pop(0) + self._lu.cfg.RemoveInstanceDisk(self._instance.uuid, + self._instance.disk[0]) + self._instance = self._lu.cfg.GetInstanceInfo(self._instance.uuid) self._feedback_fn("Temporary disk removed") except: diff --git a/lib/cmdlib/instance_utils.py b/lib/cmdlib/instance_utils.py index c12ff33..035793d 100644 --- a/lib/cmdlib/instance_utils.py +++ b/lib/cmdlib/instance_utils.py @@ -173,7 +173,7 @@ def BuildInstanceHookEnvByObject(lu, instance, secondary_nodes=None, # Override disks if disks is None: - disks = instance.disks + disks = lu.cfg.GetInstanceDisks(instance.uuid) args = { "name": instance.name, @@ -244,8 +244,11 @@ def RemoveInstance(lu, feedback_fn, instance, ignore_failures): raise errors.OpExecError("Can't remove instance's disks") feedback_fn("Warning: can't remove instance's disks") - logging.info("Removing instance %s out of cluster config", instance.name) + logging.info("Removing instance's disks") + for disk in instance.disks: + lu.cfg.RemoveInstanceDisk(instance.uuid, disk) + logging.info("Removing instance %s out of cluster config", instance.name) lu.cfg.RemoveInstance(instance.uuid) assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \ @@ -277,7 +280,8 @@ def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False): all_result = True ports_to_release = set() - anno_disks = AnnotateDiskParams(instance, instance.disks, lu.cfg) + inst_disks = lu.cfg.GetInstanceDisks(instance.uuid) + anno_disks = AnnotateDiskParams(instance, inst_disks, lu.cfg) for (idx, device) in enumerate(anno_disks): if target_node_uuid: edata = [(target_node_uuid, device)] @@ -303,8 +307,8 @@ def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False): CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template) if instance.disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]: - if len(instance.disks) > 0: - file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) + if len(inst_disks) > 0: + file_storage_dir = os.path.dirname(inst_disks[0].logical_id[1]) else: if instance.disk_template == constants.DT_SHARED_FILE: file_storage_dir = utils.PathJoin(lu.cfg.GetSharedFileStorageDir(), diff --git a/lib/config.py b/lib/config.py index d59e9a6..0dd8650 100644 --- a/lib/config.py +++ b/lib/config.py @@ -548,7 +548,7 @@ class ConfigWriter(object): if instance is None: raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) - instance_disks = instance.disks + instance_disks = self._UnlockedGetInstanceDisks(inst_uuid) all_nodes = [] for disk in instance_disks: all_nodes.extend(disk.all_nodes) @@ -634,7 +634,7 @@ class ConfigWriter(object): ret = None node_uuid = instance.primary_node - devices = instance.disks + devices = self._UnlockedGetInstanceDisks(instance.uuid) _MapLVsByNode(lvmap, devices, node_uuid) return ret @@ -848,26 +848,6 @@ class ConfigWriter(object): lvnames.update(lv_list) return lvnames - def _AllDisks(self): - """Compute the list of all Disks (recursively, including children). - - """ - def DiskAndAllChildren(disk): - """Returns a list containing the given disk and all of his children. - - """ - disks = [disk] - if disk.children: - for child_disk in disk.children: - disks.extend(DiskAndAllChildren(child_disk)) - return disks - - disks = [] - for instance in self._ConfigData().instances.values(): - for disk in instance.disks: - disks.extend(DiskAndAllChildren(disk)) - return disks - def _AllNICs(self): """Compute the list of all NICs. @@ -950,35 +930,11 @@ class ConfigWriter(object): helper(child, result) result = [] - for instance in self._ConfigData().instances.values(): - for disk in instance.disks: - helper(disk, result) + for disk in self._ConfigData().disks.values(): + helper(disk, result) return result - def _CheckDiskIDs(self, disk, l_ids): - """Compute duplicate disk IDs - - @type disk: L{objects.Disk} - @param disk: the disk at which to start searching - @type l_ids: list - @param l_ids: list of current logical ids - @rtype: list - @return: a list of error messages - - """ - result = [] - if disk.logical_id is not None: - if disk.logical_id in l_ids: - result.append("duplicate logical id %s" % str(disk.logical_id)) - else: - l_ids.append(disk.logical_id) - - if disk.children: - for child in disk.children: - result.extend(self._CheckDiskIDs(child, l_ids)) - return result - def _UnlockedVerifyConfig(self): """Verify function. @@ -993,7 +949,6 @@ class ConfigWriter(object): ports = {} data = self._ConfigData() cluster = data.cluster - seen_lids = [] # global cluster checks if not cluster.enabled_hypervisors: @@ -1094,6 +1049,20 @@ class ConfigWriter(object): ) ) + # per-disk checks + instance_disk_uuids = [d for insts in data.instances.values() + for d in insts.disks] + for disk_uuid in data.disks: + disk = data.disks[disk_uuid] + result.extend(["disk %s error: %s" % (disk.uuid, msg) + for msg in disk.Verify()]) + if disk.uuid != disk_uuid: + result.append("disk '%s' is indexed by wrong UUID '%s'" % + (disk.name, disk_uuid)) + if disk.uuid not in instance_disk_uuids: + result.append("disk '%s' is not attached to any instance" % + disk.uuid) + # per-instance checks for instance_uuid in data.instances: instance = data.instances[instance_uuid] @@ -1130,8 +1099,15 @@ class ConfigWriter(object): _helper("instance %s" % instance.name, "beparams", cluster.FillBE(instance), constants.BES_PARAMETER_TYPES) + # check that disks exists + for disk_uuid in instance.disks: + if disk_uuid not in data.disks: + result.append("Instance '%s' has invalid disk '%s'" % + (instance.name, disk_uuid)) + + instance_disks = self._UnlockedGetInstanceDisks(instance) # gather the drbd ports for duplicate checks - for (idx, dsk) in enumerate(instance.disks): + for (idx, dsk) in enumerate(instance_disks): if dsk.dev_type in constants.DTS_DRBD: tcp_port = dsk.logical_id[2] if tcp_port not in ports: @@ -1144,13 +1120,7 @@ class ConfigWriter(object): ports[net_port] = [] ports[net_port].append((instance.name, "network port")) - # instance disk verify - for idx, disk in enumerate(instance.disks): - result.extend(["instance '%s' disk %d error: %s" % - (instance.name, idx, msg) for msg in disk.Verify()]) - result.extend(self._CheckDiskIDs(disk, seen_lids)) - - wrong_names = _CheckInstanceDiskIvNames(instance.disks) + wrong_names = _CheckInstanceDiskIvNames(instance_disks) if wrong_names: tmp = "; ".join(("name of disk %s should be '%s', but is '%s'" % (idx, exp_name, actual_name)) @@ -1367,7 +1337,7 @@ class ConfigWriter(object): duplicates = [] my_dict = dict((node_uuid, {}) for node_uuid in self._ConfigData().nodes) for instance in self._ConfigData().instances.itervalues(): - for disk in instance.disks: + for disk in self._UnlockedGetInstanceDisks(instance.uuid): duplicates.extend(_AppendUsedMinors(self._UnlockedGetNodeName, instance, disk, my_dict)) for (node_uuid, minor), inst_uuid in self._temporary_drbds.iteritems(): @@ -2010,7 +1980,8 @@ class ConfigWriter(object): inst = self._ConfigData().instances[inst_uuid] inst.name = new_name - for (_, disk) in enumerate(inst.disks): + instance_disks = self._UnlockedGetInstanceDisks(inst_uuid) + for (_, disk) in enumerate(instance_disks): if disk.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]: # rename the file paths in logical and physical id file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1])) @@ -2780,7 +2751,7 @@ class ConfigWriter(object): self._ConfigData().nodes.values() + self._ConfigData().nodegroups.values() + self._ConfigData().networks.values() + - self._AllDisks() + + self._ConfigData().disks.values() + self._AllNICs() + [self._ConfigData().cluster]) diff --git a/lib/masterd/iallocator.py b/lib/masterd/iallocator.py index c9bc110..c977eb9 100644 --- a/lib/masterd/iallocator.py +++ b/lib/masterd/iallocator.py @@ -270,7 +270,8 @@ class IAReqRelocate(IARequestBase): raise errors.OpPrereqError("Instance has not exactly one secondary node", errors.ECODE_STATE) - disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks] + inst_disks = cfg.GetInstanceDisks(instance.uuid) + disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in inst_disks] disk_space = gmi.ComputeDiskSize(instance.disk_template, disk_sizes) return { @@ -741,6 +742,7 @@ class IAllocator(object): if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED: nic_dict["bridge"] = filled_params[constants.NIC_LINK] nic_data.append(nic_dict) + inst_disks = cfg.GetInstanceDisks(iinfo.uuid) pir = { "tags": list(iinfo.GetTags()), "admin_state": iinfo.admin_state, @@ -755,7 +757,7 @@ class IAllocator(object): "disks": [{constants.IDISK_SIZE: dsk.size, constants.IDISK_MODE: dsk.mode, constants.IDISK_SPINDLES: dsk.spindles} - for dsk in iinfo.disks], + for dsk in inst_disks], "disk_template": iinfo.disk_template, "disks_active": iinfo.disks_active, "hypervisor": iinfo.hypervisor, diff --git a/lib/masterd/instance.py b/lib/masterd/instance.py index e9feb86..e2e326f 100644 --- a/lib/masterd/instance.py +++ b/lib/masterd/instance.py @@ -1166,7 +1166,8 @@ class ExportInstanceHelper: src_node = instance.primary_node src_node_name = self._lu.cfg.GetNodeName(src_node) - for idx, disk in enumerate(instance.disks): + inst_disks = self._lu.cfg.GetInstanceDisks(instance.uuid) + for idx, disk in enumerate(inst_disks): self._feedback_fn("Creating a snapshot of disk/%s on node %s" % (idx, src_node_name)) @@ -1294,6 +1295,7 @@ class ExportInstanceHelper: """ instance = self._instance + inst_disks = self._lu.cfg.GetInstanceDisks(instance.uuid) assert len(disk_info) == len(instance.disks) @@ -1301,7 +1303,7 @@ class ExportInstanceHelper: ieloop = ImportExportLoop(self._lu) try: - for idx, (dev, (host, port, magic)) in enumerate(zip(instance.disks, + for idx, (dev, (host, port, magic)) in enumerate(zip(inst_disks, disk_info)): # Decide whether to use IPv6 ipv6 = netutils.IP6Address.IsValid(host) @@ -1496,8 +1498,9 @@ def RemoteImport(lu, feedback_fn, instance, pnode, source_x509_ca, len(instance.disks), pnode.primary_ip) ieloop = ImportExportLoop(lu) + inst_disks = lu.cfg.GetInstanceDisks(instance.uuid) try: - for idx, dev in enumerate(instance.disks): + for idx, dev in enumerate(inst_disks): magic = _GetInstDiskMagic(magic_base, instance.name, idx) # Import daemon options diff --git a/lib/objects.py b/lib/objects.py index 0265bcd..3c71483 100644 --- a/lib/objects.py +++ b/lib/objects.py @@ -448,10 +448,9 @@ class ConfigData(ConfigObject): @return: boolean indicating if a disk of the given type was found or not """ - for instance in self.instances.values(): - for disk in instance.disks: - if disk.IsBasedOnDiskType(dev_type): - return True + for disk in self.disks.values(): + if disk.IsBasedOnDiskType(dev_type): + return True return False def UpgradeConfig(self): @@ -1166,8 +1165,8 @@ class Instance(TaggableObject): @type idx: int @param idx: the disk index - @rtype: L{Disk} - @return: the corresponding disk + @rtype: string + @return: the corresponding disk's uuid @raise errors.OpPrereqError: when the given index is not valid """ @@ -1194,7 +1193,7 @@ class Instance(TaggableObject): if _with_private: bo["osparams_private"] = self.osparams_private.Unprivate() - for attr in "nics", "disks", "disks_info": + for attr in "nics", "disks_info": alist = bo.get(attr, None) if alist: nlist = outils.ContainerToDicts(alist) @@ -1217,7 +1216,6 @@ class Instance(TaggableObject): del val["admin_up"] obj = super(Instance, cls).FromDict(val) obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC) - obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk) obj.disks_info = outils.ContainerFromDicts(obj.disks_info, list, Disk) return obj @@ -1227,8 +1225,8 @@ class Instance(TaggableObject): """ for nic in self.nics: nic.UpgradeConfig() - for disk in self.disks: - disk.UpgradeConfig() + if self.disks is None: + self.disks = [] if self.hvparams: for key in constants.HVC_GLOBALS: try: diff --git a/lib/rpc/node.py b/lib/rpc/node.py index 19fc018..a6a7e59 100644 --- a/lib/rpc/node.py +++ b/lib/rpc/node.py @@ -887,7 +887,8 @@ class RpcRunner(_RpcClientBase, idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams) if osp is not None: idict["osparams"].update(osp) - idict["disks_info"] = self._DisksDictDP(node, (instance.disks, instance)) + disks = self._cfg.GetInstanceDisks(instance.uuid) + idict["disks_info"] = self._DisksDictDP(node, (disks, instance)) for nic in idict["nics"]: nic["nicparams"] = objects.FillDict( cluster.nicparams[constants.PP_DEFAULT], diff --git a/src/Ganeti/Config.hs b/src/Ganeti/Config.hs index a88076c..f6243f7 100644 --- a/src/Ganeti/Config.hs +++ b/src/Ganeti/Config.hs @@ -349,9 +349,8 @@ getInstAllNodes cfg name = do -- | Get disks for a given instance. -- The instance is specified by name or uuid. getInstDisks :: ConfigData -> String -> ErrorResult [Disk] -getInstDisks cfg = - -- getInstance cfg iname >>= mapM (getDisk cfg) . instDisks - liftM instDisks . getInstance cfg +getInstDisks cfg iname = + getInstance cfg iname >>= mapM (getDisk cfg) . instDisks -- | Get disks for a given instance object. getInstDisksFromObj :: ConfigData -> Instance -> ErrorResult [Disk] diff --git a/src/Ganeti/Objects.hs b/src/Ganeti/Objects.hs index e5f742c..c03279b 100644 --- a/src/Ganeti/Objects.hs +++ b/src/Ganeti/Objects.hs @@ -461,7 +461,7 @@ $(buildObject "Instance" "inst" $ , simpleField "osparams_private" [t| OsParamsPrivate |] , simpleField "admin_state" [t| AdminState |] , simpleField "nics" [t| [PartialNic] |] - , simpleField "disks" [t| [Disk] |] + , simpleField "disks" [t| [String] |] , simpleField "disk_template" [t| DiskTemplate |] , simpleField "disks_active" [t| Bool |] , optionalField $ simpleField "network_port" [t| Int |] -- 1.9.1
