On Mon, Nov 17, 2014 at 11:29 AM, 'Aaron Karper' via ganeti-devel
<[email protected]> wrote:
> More and more parts of the code use it to check if a certain operation
> is appropriate. The refactor reflects that this is a general helper and
> no longer specific to instances
>
> Signed-off-by: Aaron Karper <[email protected]>
> ---
>  lib/cmdlib/cluster/__init__.py    |  1 -
>  lib/cmdlib/cluster/verify.py      | 11 ++++----
>  lib/cmdlib/group.py               |  5 ++--
>  lib/cmdlib/instance_create.py     |  5 ++--
>  lib/cmdlib/instance_migration.py  |  9 +++----
>  lib/cmdlib/instance_set_params.py | 10 ++++----
>  lib/cmdlib/instance_storage.py    | 12 ++++-----
>  lib/cmdlib/instance_utils.py      | 54 
> ---------------------------------------
>  lib/cmdlib/node.py                |  3 +--
>  lib/utils/__init__.py             | 49 +++++++++++++++++++++++++++++++++++
>  10 files changed, 74 insertions(+), 85 deletions(-)
>
> diff --git a/lib/cmdlib/cluster/__init__.py b/lib/cmdlib/cluster/__init__.py
> index d429f53..28ddb96 100644
> --- a/lib/cmdlib/cluster/__init__.py
> +++ b/lib/cmdlib/cluster/__init__.py
> @@ -69,7 +69,6 @@ from ganeti.cmdlib.common import ShareAll, RunPostHook, \
>    AddInstanceCommunicationNetworkOp, ConnectInstanceCommunicationNetworkOp, \
>    CheckImageValidity, \
>    CheckDiskAccessModeConsistency, CreateNewClientCert, EnsureKvmdOnNodes
> -from ganeti.cmdlib.instance_utils import AnyDiskOfType, AllDiskOfType
>
>  import ganeti.masterd.instance
>
> diff --git a/lib/cmdlib/cluster/verify.py b/lib/cmdlib/cluster/verify.py
> index 33d29eb..5058815 100644
> --- a/lib/cmdlib/cluster/verify.py
> +++ b/lib/cmdlib/cluster/verify.py
> @@ -51,7 +51,6 @@ from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, 
> ResultWithJobs
>  from ganeti.cmdlib.common import ShareAll, ComputeAncillaryFiles, \
>      CheckNodePVs, ComputeIPolicyInstanceViolation, AnnotateDiskParams, \
>      SupportsOob
> -from ganeti.cmdlib.instance_utils import AnyDiskOfType, AllDiskOfType
>
>
>  def _GetAllHypervisorParameters(cluster, instances):
> @@ -408,7 +407,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
>          # Important: access only the instances whose lock is owned
>          instance = self.cfg.GetInstanceInfoByName(inst_name)
>          disks = self.cfg.GetInstanceDisks(instance.uuid)
> -        if AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
> +        if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
>            nodes.update(self.cfg.GetInstanceSecondaryNodes(instance.uuid))
>
>        self.needed_locks[locking.LEVEL_NODE] = nodes
> @@ -458,7 +457,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
>
>      for inst in self.my_inst_info.values():
>        disks = self.cfg.GetInstanceDisks(inst.uuid)
> -      if AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
> +      if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
>          inst_nodes = self.cfg.GetInstanceNodes(inst.uuid)
>          for nuuid in inst_nodes:
>            if self.all_node_info[nuuid].group != self.group_uuid:
> @@ -822,7 +821,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
>      es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes)
>      disks = self.cfg.GetInstanceDisks(instance.uuid)
>      if any(es_flags.values()):
> -      if not AllDiskOfType(disks, constants.DTS_EXCL_STORAGE):
> +      if not utils.AllDiskOfType(disks, constants.DTS_EXCL_STORAGE):
>          # Disk template not compatible with exclusive_storage: no instance
>          # node should have the flag set
>          es_nodes = [n
> @@ -842,7 +841,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
>                        " exclusive storage is enabled, try running"
>                        " gnt-cluster repair-disk-sizes", idx)
>
> -    if AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
> +    if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
>        instance_nodes = utils.NiceSort(inst_nodes)
>        instance_groups = {}
>
> @@ -2085,7 +2084,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
>        # If the instance is not fully redundant we cannot survive losing its
>        # primary node, so we are not N+1 compliant.
>        inst_disks = self.cfg.GetInstanceDisks(instance.uuid)
> -      if not AllDiskOfType(inst_disks, constants.DTS_MIRRORED):
> +      if not utils.AllDiskOfType(inst_disks, constants.DTS_MIRRORED):
>          i_non_redundant.append(instance)
>
>        if not cluster.FillBE(instance)[constants.BE_AUTO_BALANCE]:
> diff --git a/lib/cmdlib/group.py b/lib/cmdlib/group.py
> index 13ffb31..87cb6a7 100644
> --- a/lib/cmdlib/group.py
> +++ b/lib/cmdlib/group.py
> @@ -48,7 +48,6 @@ from ganeti.cmdlib.common import MergeAndVerifyHvState, \
>    CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceLvsToNodes, \
>    CheckIpolicyVsDiskTemplates, CheckDiskAccessModeValidity, \
>    CheckDiskAccessModeConsistency, ConnectInstanceCommunicationNetworkOp
> -from ganeti.cmdlib.instance_utils import AnyDiskOfType
>
>  import ganeti.masterd.instance
>
> @@ -317,7 +316,7 @@ class LUGroupAssignNodes(NoHooksLU):
>
>      for inst in instance_data.values():
>        inst_disks = self.cfg.GetInstanceDisks(inst.uuid)
> -      if not AnyDiskOfType(inst_disks, constants.DTS_INT_MIRROR):
> +      if not utils.AnyDiskOfType(inst_disks, constants.DTS_INT_MIRROR):
>          continue
>
>        inst_nodes = self.cfg.GetInstanceNodes(inst.uuid)
> @@ -947,7 +946,7 @@ class LUGroupVerifyDisks(NoHooksLU):
>      for inst in self.instances.values():
>        disks = self.cfg.GetInstanceDisks(inst.uuid)
>        if not (inst.disks_active and
> -              AnyDiskOfType(disks, [constants.DT_DRBD8])):
> +              utils.AnyDiskOfType(disks, [constants.DT_DRBD8])):
>          continue
>
>        secondary_nodes = self.cfg.GetInstanceSecondaryNodes(inst.uuid)
> diff --git a/lib/cmdlib/instance_create.py b/lib/cmdlib/instance_create.py
> index a5201f3..e3d41e6 100644
> --- a/lib/cmdlib/instance_create.py
> +++ b/lib/cmdlib/instance_create.py
> @@ -74,8 +74,7 @@ from ganeti.cmdlib.instance_utils import \
>    CheckHostnameSane, CheckOpportunisticLocking, \
>    ComputeFullBeParams, ComputeNics, GetClusterDomainSecret, \
>    CheckInstanceExistence, CreateInstanceAllocRequest, BuildInstanceHookEnv, \
> -  NICListToTuple, CheckNicsBridgesExist, CheckCompressionTool, \
> -  AnyDiskOfType
> +  NICListToTuple, CheckNicsBridgesExist, CheckCompressionTool
>  import ganeti.masterd.instance
>
>
> @@ -1130,7 +1129,7 @@ class LUInstanceCreate(LogicalUnit):
>
>          if os_image is None and not self.op.no_install:
>            pause_sync = (not self.op.wait_for_sync and
> -                        AnyDiskOfType(disks, constants.DTS_INT_MIRROR))
> +                        utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR))
>            if pause_sync:
>              feedback_fn("* pausing disk sync to install instance OS")
>              result = 
> self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
> diff --git a/lib/cmdlib/instance_migration.py 
> b/lib/cmdlib/instance_migration.py
> index fdfedb2..012786c 100644
> --- a/lib/cmdlib/instance_migration.py
> +++ b/lib/cmdlib/instance_migration.py
> @@ -46,8 +46,7 @@ from ganeti.cmdlib.instance_storage import 
> CheckDiskConsistency, \
>    ExpandCheckDisks, ShutdownInstanceDisks, AssembleInstanceDisks
>  from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
>    CheckTargetNodeIPolicy, ReleaseLocks, CheckNodeNotDrained, \
> -  CopyLockList, CheckNodeFreeMemory, CheckInstanceBridgesExist, \
> -  AnyDiskOfType
> +  CopyLockList, CheckNodeFreeMemory, CheckInstanceBridgesExist
>
>  import ganeti.masterd.instance
>
> @@ -91,7 +90,7 @@ def _DeclareLocksForMigration(lu, level):
>      # Node locks are already declared here rather than at LEVEL_NODE as we 
> need
>      # the instance object anyway to declare the node allocation lock.
>      disks = lu.cfg.GetInstanceDisks(instance.uuid)
> -    if AnyDiskOfType(disks, constants.DTS_EXT_MIRROR):
> +    if utils.AnyDiskOfType(disks, constants.DTS_EXT_MIRROR):
>        if lu.op.target_node is None:
>          lu.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
>          lu.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
> @@ -169,7 +168,7 @@ class LUInstanceFailover(LogicalUnit):
>        }
>
>      disks = self.cfg.GetInstanceDisks(instance.uuid)
> -    if AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
> +    if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
>        secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
>        env["OLD_SECONDARY"] = self.cfg.GetNodeName(secondary_nodes[0])
>        env["NEW_SECONDARY"] = self.cfg.GetNodeName(source_node_uuid)
> @@ -242,7 +241,7 @@ class LUInstanceMigrate(LogicalUnit):
>        })
>
>      disks = self.cfg.GetInstanceDisks(instance.uuid)
> -    if AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
> +    if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
>        secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
>        env["OLD_SECONDARY"] = self.cfg.GetNodeName(secondary_nodes[0])
>        env["NEW_SECONDARY"] = self.cfg.GetNodeName(source_node_uuid)
> diff --git a/lib/cmdlib/instance_set_params.py 
> b/lib/cmdlib/instance_set_params.py
> index 0619a96..7b3ff9d 100644
> --- a/lib/cmdlib/instance_set_params.py
> +++ b/lib/cmdlib/instance_set_params.py
> @@ -66,7 +66,7 @@ from ganeti.cmdlib.instance_utils import 
> BuildInstanceHookEnvByObject, \
>    UpdateMetadata, CheckForConflictingIp, \
>    PrepareContainerMods, ComputeInstanceCommunicationNIC, \
>    ApplyContainerMods, ComputeIPolicyInstanceSpecViolation, \
> -  CheckNodesPhysicalCPUs, AllDiskOfType, AnyDiskOfType
> +  CheckNodesPhysicalCPUs
>  import ganeti.masterd.instance
>
>
> @@ -522,7 +522,7 @@ class LUInstanceSetParams(LogicalUnit):
>      pnode_uuid = self.instance.primary_node
>
>      inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
> -    if AnyDiskOfType(inst_disks, constants.DTS_NOT_CONVERTIBLE_FROM):
> +    if utils.AnyDiskOfType(inst_disks, constants.DTS_NOT_CONVERTIBLE_FROM):
>        raise errors.OpPrereqError("Conversion from the '%s' disk template is"
>                                   " not supported" % 
> self.instance.disk_template,
>                                   errors.ECODE_INVAL)
> @@ -533,7 +533,7 @@ class LUInstanceSetParams(LogicalUnit):
>                                   errors.ECODE_INVAL)
>
>      if (self.op.disk_template != constants.DT_EXT and
> -        AllDiskOfType(inst_disks, [self.op.disk_template])):
> +        utils.AllDiskOfType(inst_disks, [self.op.disk_template])):
>        raise errors.OpPrereqError("Instance already has disk template %s" %
>                                   self.instance.disk_template,
>                                   errors.ECODE_INVAL)
> @@ -608,12 +608,12 @@ class LUInstanceSetParams(LogicalUnit):
>      # TODO remove setting the disk template after DiskSetParams exists.
>      # node capacity checks
>      if (self.op.disk_template == constants.DT_PLAIN and
> -        AllDiskOfType(inst_disks, [constants.DT_DRBD8])):
> +        utils.AllDiskOfType(inst_disks, [constants.DT_DRBD8])):
>        # we ensure that no capacity checks will be made for conversions from
>        # the 'drbd' to the 'plain' disk template
>        pass
>      elif (self.op.disk_template == constants.DT_DRBD8 and
> -          AllDiskOfType(inst_disks, [constants.DT_PLAIN])):
> +          utils.AllDiskOfType(inst_disks, [constants.DT_PLAIN])):
>        # for conversions from the 'plain' to the 'drbd' disk template, check
>        # only the remote node's capacity
>        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, 
> self.disks_info)
> diff --git a/lib/cmdlib/instance_storage.py b/lib/cmdlib/instance_storage.py
> index 1489e0f..c446ecf 100644
> --- a/lib/cmdlib/instance_storage.py
> +++ b/lib/cmdlib/instance_storage.py
> @@ -52,8 +52,7 @@ from ganeti.cmdlib.common import INSTANCE_DOWN, 
> INSTANCE_NOT_RUNNING, \
>    CheckDiskTemplateEnabled
>  from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \
>    CopyLockList, ReleaseLocks, CheckNodeVmCapable, \
> -  BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy, 
> \
> -  AnyDiskOfType, AllDiskOfType
> +  BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy
>
>  import ganeti.masterd.instance
>
> @@ -918,9 +917,9 @@ class LUInstanceRecreateDisks(LogicalUnit):
>                                      len(self.op.node_uuids)),
>                                     errors.ECODE_INVAL)
>        disks = self.cfg.GetInstanceDisks(instance.uuid)
> -      assert (not AnyDiskOfType(disks, [constants.DT_DRBD8]) or
> +      assert (not utils.AnyDiskOfType(disks, [constants.DT_DRBD8]) or
>                len(self.op.node_uuids) == 2)
> -      assert (not AnyDiskOfType(disks, [constants.DT_PLAIN]) or
> +      assert (not utils.AnyDiskOfType(disks, [constants.DT_PLAIN]) or
>                len(self.op.node_uuids) == 1)
>        primary_node = self.op.node_uuids[0]
>      else:
> @@ -2336,8 +2335,9 @@ class TLReplaceDisks(Tasklet):
>          self.disks = range(len(self.instance.disks))
>
>      disks = self.cfg.GetInstanceDisks(self.instance.uuid)
> -    if not AllDiskOfType(
> -        map(lambda i: disks[i], self.disks), [constants.DT_DRBD8]):
> +    if (not self.disks or
> +        not utils.AllDiskOfType(map(lambda i: disks[i], self.disks),
> +                                [constants.DT_DRBD8])):
>        raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
>                                   " instances", errors.ECODE_INVAL)
>
> diff --git a/lib/cmdlib/instance_utils.py b/lib/cmdlib/instance_utils.py
> index f59afc7..58a8bbb 100644
> --- a/lib/cmdlib/instance_utils.py
> +++ b/lib/cmdlib/instance_utils.py
> @@ -1219,57 +1219,3 @@ def ComputeNics(op, cluster, default_ip, cfg, ec_id):
>
>    return nics
>
> -
> -def AllDiskOfType(disks_info, dev_types):
> -  """Checks if the instance has only disks of any of the dev_types.
> -
> -  @type disks_info: list of L{Disk}
> -  @param disks_info: all the disks of the instance.
> -  @type dev_types: list of disk templates
> -  @param dev_types: the disk type required.
> -
> -  @rtype: bool
> -  @return: True iff the instance only has disks of type dev_type.
> -  """
> -
> -  if isinstance(dev_types, str):
> -    raise errors.ProgrammerError("AllDiskOfType called with disk "
> -                                 "template (%s) instead of list "
> -                                 "of disk templates." % dev_types)
> -
> -  if not disks_info and constants.DT_DISKLESS not in dev_types:
> -    return False
> -
> -  for disk in disks_info:
> -    if disk.dev_type not in dev_types:
> -      return False
> -
> -  return True
> -
> -
> -def AnyDiskOfType(disks_info, dev_types):
> -  """Checks if the instance has some disks of any types in dev_types.
> -
> -  @type disks_info: list of L{Disk}
> -  @param disks_info: all the disks of the instance.
> -  @type dev_types: list of disk template
> -  @param dev_types: the disk type required.
> -
> -  @rtype: bool
> -  @return: True if the instance has disks of type dev_types or the instance 
> has
> -    no disks and the dev_types allow DT_DISKLESS.
> -  """
> -
> -  if isinstance(dev_types, str):
> -    raise errors.ProgrammerError("AnyDiskOfType called with disk "
> -                                 "template (%s) instead of list "
> -                                 "of disk templates." % dev_types)
> -
> -  if not disks_info and constants.DT_DISKLESS in dev_types:
> -    return True
> -
> -  for disk in disks_info:
> -    if disk.dev_type in dev_types:
> -      return True
> -
> -  return False
> diff --git a/lib/cmdlib/node.py b/lib/cmdlib/node.py
> index fa85f92..ba57ae9 100644
> --- a/lib/cmdlib/node.py
> +++ b/lib/cmdlib/node.py
> @@ -54,7 +54,6 @@ from ganeti.cmdlib.common import CheckParamsNotGlobal, \
>    FindFaultyInstanceDisks, CheckStorageTypeEnabled, CreateNewClientCert, \
>    AddNodeCertToCandidateCerts, RemoveNodeCertFromCandidateCerts, \
>    EnsureKvmdOnNodes
> -from ganeti.cmdlib.instance_utils import AnyDiskOfType
>  from ganeti.ssh import GetSshPortMap
>
>
> @@ -542,7 +541,7 @@ class LUNodeSetParams(LogicalUnit):
>
>      """
>      disks = self.cfg.GetInstanceDisks(instance.disks)
> -    any_mirrored = AnyDiskOfType(disks, constants.DTS_INT_MIRROR)
> +    any_mirrored = utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR)
>      return (any_mirrored and
>              self.op.node_uuid in self.cfg.GetInstanceNodes(instance.uuid))
>
> diff --git a/lib/utils/__init__.py b/lib/utils/__init__.py
> index 17d517c..2505cba 100644
> --- a/lib/utils/__init__.py
> +++ b/lib/utils/__init__.py
> @@ -884,3 +884,52 @@ def ValidateDeviceNames(kind, container):
>                                     errors.ECODE_NOTUNIQUE)
>        else:
>          valid.append(name)
> +
> +
> +def AllDiskOfType(disks_info, dev_types):
> +  """Checks if the instance has only disks of any of the dev_types.
> +
> +  @type disks_info: list of L{Disk}
> +  @param disks_info: all the disks of the instance.
> +  @type dev_types: list of disk templates
> +  @param dev_types: the disk type required.
> +
> +  @rtype: bool
> +  @return: True iff the instance only has disks of type dev_type.
> +  """
> +
> +  assert not isinstance(dev_types, str)
> +
> +  if not disks_info and constants.DT_DISKLESS not in dev_types:
> +    return False
> +
> +  for disk in disks_info:
> +    if disk.dev_type not in dev_types:
> +      return False
> +
> +  return True
> +
> +
> +def AnyDiskOfType(disks_info, dev_types):
> +  """Checks if the instance has some disks of any types in dev_types.
> +
> +  @type disks_info: list of L{Disk}
> +  @param disks_info: all the disks of the instance.
> +  @type dev_types: list of disk template
> +  @param dev_types: the disk type required.
> +
> +  @rtype: bool
> +  @return: True if the instance has disks of type dev_types or the instance 
> has
> +    no disks and the dev_types allow DT_DISKLESS.
> +  """
> +
> +  assert not isinstance(dev_types, str)
> +
> +  if not disks_info and constants.DT_DISKLESS in dev_types:
> +    return True
> +
> +  for disk in disks_info:
> +    if disk.dev_type in dev_types:
> +      return True
> +
> +  return False
> --
> 2.1.0.rc2.206.gedb03e5
>

LGTM, thanks.

Michele

-- 
Google Germany GmbH
Dienerstr. 12
80331 München

Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg
Geschäftsführer: Graham Law, Christine Elizabeth Flores

Reply via email to