Interdiff:
diff --git a/lib/cmdlib/instance.py b/lib/cmdlib/instance.py
index 5f94775..b5dc099 100644
--- a/lib/cmdlib/instance.py
+++ b/lib/cmdlib/instance.py
@@ -56,7 +56,8 @@ from ganeti.cmdlib.common import INSTANCE_ONLINE,
INSTANCE_DOWN, \
_IsExclusiveStorageEnabledNode, _CheckHVParams, _CheckOSParams, \
_GetWantedInstances, _CheckInstancesNodeGroups, _AnnotateDiskParams, \
_GetUpdatedParams, _ExpandInstanceName, _FindFaultyInstanceDisks, \
- _ComputeIPolicySpecViolation, _CheckInstanceState, _ExpandNodeName
+ _ComputeIPolicySpecViolation, _ComputeIPolicyInstanceViolation, \
+ _CheckInstanceState, _ExpandNodeName
from ganeti.cmdlib.instance_utils import _AssembleInstanceDisks, \
_BuildInstanceHookEnvByObject, _GetClusterDomainSecret, \
_BuildInstanceHookEnv, _NICListToTuple, _NICToTuple,
_CheckNodeNotDrained, \
@@ -1190,7 +1191,7 @@ class LUInstanceCreate(LogicalUnit):
if self.op.no_install and self.op.start:
self.LogInfo("No-installation mode selected, disabling startup")
self.op.start = False
- # validate/normalize the instance name
+ # validate/normalize the instance name
self.op.instance_name = \
netutils.Hostname.GetNormalizedName(self.op.instance_name)
@@ -1202,7 +1203,7 @@ class LUInstanceCreate(LogicalUnit):
# check nics' parameter names
for nic in self.op.nics:
utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
- # check that NIC's parameters names are unique and valid
+ # check that NIC's parameters names are unique and valid
utils.ValidateDeviceNames("NIC", self.op.nics)
# check that disk's names are unique and valid
@@ -1260,7 +1261,7 @@ class LUInstanceCreate(LogicalUnit):
# file storage checks
if (self.op.file_driver and
- not self.op.file_driver in constants.FILE_DRIVER):
+ not self.op.file_driver in constants.FILE_DRIVER):
raise errors.OpPrereqError("Invalid file driver name '%s'" %
self.op.file_driver, errors.ECODE_INVAL)
@@ -1599,7 +1600,7 @@ class LUInstanceCreate(LogicalUnit):
self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
if (self.op.hypervisor is None and
- einfo.has_option(constants.INISECT_INS, "hypervisor")):
+ einfo.has_option(constants.INISECT_INS, "hypervisor")):
self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
if einfo.has_section(constants.INISECT_HYP):
@@ -1614,7 +1615,7 @@ class LUInstanceCreate(LogicalUnit):
for name, value in einfo.items(constants.INISECT_BEP):
if name not in self.op.beparams:
self.op.beparams[name] = value
- # Compatibility for the old "memory" be param
+ # Compatibility for the old "memory" be param
if name == constants.BE_MEMORY:
if constants.BE_MAXMEM not in self.op.beparams:
self.op.beparams[constants.BE_MAXMEM] = value
@@ -1624,7 +1625,7 @@ class LUInstanceCreate(LogicalUnit):
# try to read the parameters old style, from the main section
for name in constants.BES_PARAMETERS:
if (name not in self.op.beparams and
- einfo.has_option(constants.INISECT_INS, name)):
+ einfo.has_option(constants.INISECT_INS, name)):
self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
if einfo.has_section(constants.INISECT_OSP):
@@ -1642,18 +1643,18 @@ class LUInstanceCreate(LogicalUnit):
for name in self.op.hvparams.keys():
if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
del self.op.hvparams[name]
- # beparams
+ # beparams
be_defs = cluster.SimpleFillBE({})
for name in self.op.beparams.keys():
if name in be_defs and be_defs[name] == self.op.beparams[name]:
del self.op.beparams[name]
- # nic params
+ # nic params
nic_defs = cluster.SimpleFillNIC({})
for nic in self.op.nics:
for name in constants.NICS_PARAMETERS:
if name in nic and name in nic_defs and nic[name] ==
nic_defs[name]:
del nic[name]
- # osparams
+ # osparams
os_defs = cluster.SimpleFillOS(self.op.os_type, {})
for name in self.op.osparams.keys():
if name in os_defs and os_defs[name] == self.op.osparams[name]:
@@ -1702,12 +1703,12 @@ class LUInstanceCreate(LogicalUnit):
self._old_instance_name = None
if (not self.cfg.GetVGName() and
- self.op.disk_template not in constants.DTS_NOT_LVM):
+ self.op.disk_template not in constants.DTS_NOT_LVM):
raise errors.OpPrereqError("Cluster does not support lvm-based"
" instances", errors.ECODE_STATE)
if (self.op.hypervisor is None or
- self.op.hypervisor == constants.VALUE_AUTO):
+ self.op.hypervisor == constants.VALUE_AUTO):
self.op.hypervisor = self.cfg.GetHypervisorType()
cluster = self.cfg.GetClusterInfo()
@@ -1941,7 +1942,7 @@ class LUInstanceCreate(LogicalUnit):
raise errors.OpPrereqError("Online logical volumes found, cannot"
" adopt: %s" %
utils.CommaJoin(online_lvs),
errors.ECODE_STATE)
- # update the size of disk based on what is found
+ # update the size of disk based on what is found
for dsk in self.disks:
dsk[constants.IDISK_SIZE] = \
int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
@@ -2318,7 +2319,7 @@ class LUInstanceRename(LogicalUnit):
hostname = _CheckHostnameSane(self, new_name)
new_name = self.op.new_name = hostname.name
if (self.op.ip_check and
- netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
+ netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
raise errors.OpPrereqError("IP %s of instance %s already in use" %
(hostname.ip, new_name),
errors.ECODE_NOTUNIQUE)
@@ -2337,7 +2338,7 @@ class LUInstanceRename(LogicalUnit):
rename_file_storage = False
if (inst.disk_template in constants.DTS_FILEBASED and
- self.op.new_name != inst.name):
+ self.op.new_name != inst.name):
old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
rename_file_storage = True
@@ -2475,6 +2476,27 @@ def _CheckInstanceBridgesExist(lu, instance,
node=None):
_CheckNicsBridgesExist(lu, instance.nics, node)
+def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
+ target_group, cfg,
+
_compute_fn=_ComputeIPolicyInstanceViolation):
+ """Compute if instance meets the specs of the new target group.
+
+ @param ipolicy: The ipolicy to verify
+ @param instance: The instance object to verify
+ @param current_group: The current group of the instance
+ @param target_group: The new group of the instance
+ @type cfg: L{config.ConfigWriter}
+ @param cfg: Cluster configuration
+ @param _compute_fn: The function to verify ipolicy (unittest only)
+ @see: L{_ComputeIPolicySpecViolation}
+
+ """
+ if current_group == target_group:
+ return []
+ else:
+ return _compute_fn(ipolicy, instance, cfg)
+
+
def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
_compute_fn=_ComputeIPolicyNodeViolation):
"""Checks that the target node is correct in terms of instance policy.
@@ -3344,7 +3366,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
errors.ECODE_INVAL)
if ((self.op.nodes or self.op.iallocator) and
- sorted(self.disks.keys()) != range(len(instance.disks))):
+ sorted(self.disks.keys()) != range(len(instance.disks))):
raise errors.OpPrereqError("Can't recreate disks partially and"
" change the nodes at the same time",
errors.ECODE_INVAL)
@@ -3385,7 +3407,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
# need to update the nodes and minors
assert len(self.op.nodes) == 2
assert len(disk.logical_id) == 6 # otherwise disk internals
- # have changed
+ # have changed
(_, _, old_port, _, _, old_secret) = disk.logical_id
new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes,
instance.name)
new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
@@ -4104,7 +4126,7 @@ class LUInstanceReinstall(LogicalUnit):
assert instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
_CheckNodeOnline(self, instance.primary_node, "Instance primary node"
- " offline, cannot
reinstall")
+ " offline, cannot reinstall")
if instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Instance '%s' has no disks" %
diff --git a/lib/cmdlib/instance_utils.py b/lib/cmdlib/instance_utils.py
index adae1cb..8718af3 100644
--- a/lib/cmdlib/instance_utils.py
+++ b/lib/cmdlib/instance_utils.py
@@ -238,7 +238,7 @@ def _ShutdownInstanceDisks(lu, instance, disks=None,
ignore_primary=False):
lu.LogWarning("Could not shutdown block device %s on node %s: %s",
disk.iv_name, node, msg)
if ((node == instance.primary_node and not ignore_primary) or
- (node != instance.primary_node and not result.offline)):
+ (node != instance.primary_node and not result.offline)):
all_result = False
return all_result