Signed-off-by: Michael Hanselmann <[email protected]>
---
lib/cmdlib.py | 171 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
lib/opcodes.py | 19 ++++++-
2 files changed, 188 insertions(+), 2 deletions(-)
diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index 49f7eb5..fbec6a4 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -9760,6 +9760,177 @@ class LUNodeEvacStrategy(NoHooksLU):
return result
+class LUNodeEvacuate(NoHooksLU):
+ """Evacuates instances off a list of nodes.
+
+ """
+ REQ_BGL = False
+
+ def CheckArguments(self):
+ _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
+
+ def ExpandNames(self):
+ self.op.nodes = _GetWantedNodes(self, self.op.nodes)
+
+ if self.op.remote_node is not None:
+ self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
+
+ if self.op.remote_node is not None:
+ if self.op.remote_node in self.op.nodes:
+ raise errors.OpPrereqError("Can not use evacuated node as a new"
+ " secondary node", errors.ECODE_INVAL)
+
+ if self.op.primary:
+ raise errors.OpPrereqError("Without the use of an iallocator only"
+ " secondary instances can be evacuated",
+ errors.ECODE_INVAL)
+
+ # Declare locks
+ self.share_locks = dict.fromkeys(locking.LEVELS, 1)
+ self.needed_locks = {
+ locking.LEVEL_INSTANCE: [],
+ locking.LEVEL_NODEGROUP: [],
+ locking.LEVEL_NODE: [],
+ }
+
+ # Determine nodes to be locked
+ self.lock_nodes = set(self.op.nodes)
+
+ if self.op.remote_node is None:
+ # Iallocator will choose any node(s) in the same groups
+
self.lock_nodes.update(self.cfg.GetNodeGroupMembersByNodes(self.op.nodes))
+ else:
+ self.lock_nodes.add(self.op.remote_node)
+
+ def _DetermineInstances(self):
+ """Builds list of instances to operate on.
+
+ """
+ if not self.op.primary:
+ # Secondary instances only
+ inst_fn = _GetNodeSecondaryInstances
+ elif not self.op.secondary:
+ # Primary instances only
+ inst_fn = _GetNodePrimaryInstances
+ assert self.op.remote_node is None, \
+ "Evacuating primary instances requires iallocator"
+ else:
+ # All instances
+ inst_fn = _GetNodeInstances
+
+ return itertools.chain(*(inst_fn(self.cfg, node)
+ for node in self.op.nodes))
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_INSTANCE:
+ # Lock instances optimistically, needs verification once node and group
+ # locks have been acquired
+ self.needed_locks[locking.LEVEL_INSTANCE] = \
+ set(i.name for i in self._DetermineInstances())
+
+ elif level == locking.LEVEL_NODEGROUP:
+ # Lock node groups optimistically, needs verification once nodes have
+ # been acquired
+ self.needed_locks[locking.LEVEL_NODEGROUP] = \
+ self.cfg.GetNodeGroupsFromNodes(self.lock_nodes)
+
+ elif level == locking.LEVEL_NODE:
+ self.needed_locks[locking.LEVEL_NODE] = self.lock_nodes
+
+ def CheckPrereq(self):
+ # Verify locks
+ owned_instances = self.glm.list_owned(locking.LEVEL_INSTANCE)
+ owned_nodes = self.glm.list_owned(locking.LEVEL_NODE)
+ owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+
+ assert owned_nodes == self.lock_nodes
+
+ wanted_groups = self.cfg.GetNodeGroupsFromNodes(owned_nodes)
+ if owned_groups != wanted_groups:
+ raise errors.OpExecError("Nodes changed groups since locks were
acquired,"
+ " current groups are '%s', used to be '%s'" %
+ (utils.CommaJoin(wanted_groups),
+ utils.CommaJoin(owned_groups)))
+
+ # Determine affected instances
+ self.instances = self._DetermineInstances()
+ self.instance_names = [i.name for i in self.instances]
+
+ if set(self.instance_names) != owned_instances:
+ raise errors.OpExecError("Instances on nodes '%s' changed since locks"
+ " were acquired, current instances are '%s',"
+ " used to be '%s'" %
+ (utils.CommaJoin(self.op.nodes),
+ utils.CommaJoin(self.instance_names),
+ utils.CommaJoin(owned_instances)))
+
+ if self.instance_names:
+ self.LogInfo("Evacuating instances from node(s) %s: %s",
+ utils.CommaJoin(utils.NiceSort(self.op.nodes)),
+ utils.CommaJoin(utils.NiceSort(self.instance_names)))
+ else:
+ self.LogInfo("No instances to evacuate from node(s) %s",
+ utils.CommaJoin(self.op.nodes))
+
+ if self.op.remote_node is not None:
+ for i in self.instances:
+ if i.primary_node == self.op.remote_node:
+ raise errors.OpPrereqError("Node %s is the primary node of"
+ " instance %s, cannot use it as"
+ " secondary" %
+ (self.op.remote_node, i.name),
+ errors.ECODE_INVAL)
+
+ def Exec(self, feedback_fn):
+ assert (self.op.iallocator is not None) ^ (self.op.remote_node is not None)
+
+ if not self.instance_names:
+ # No instances to evacuate
+ jobs = []
+
+ elif self.op.iallocator is not None:
+ # TODO: Implement relocation to other group
+ ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_MRELOC,
+ reloc_mode=constants.IALLOCATOR_MRELOC_KEEP,
+ instances=list(self.instance_names),
+ target_groups=[])
+
+ ial.Run(self.op.iallocator)
+
+ if not ial.success:
+ raise errors.OpPrereqError("Can't compute node evacuation using"
+ " iallocator '%s': %s" %
+ (self.op.iallocator, ial.info),
+ errors.ECODE_NORES)
+
+ jobs = [[opcodes.OpCode.LoadOpCode(state) for state in jobset]
+ for jobset in ial.result]
+
+ # Set "early_release" flag on opcodes where available
+ early_release = self.op.early_release
+ for op in itertools.chain(*jobs):
+ try:
+ op.early_release = early_release
+ except AttributeError:
+ assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
+
+ elif self.op.remote_node is not None:
+ assert not self.op.primary
+ jobs = [
+ [opcodes.OpInstanceReplaceDisks(instance_name=instance_name,
+ remote_node=self.op.remote_node,
+ disks=[],
+ mode=constants.REPLACE_DISK_CHG,
+ early_release=self.op.early_release)]
+ for instance_name in self.instance_names
+ ]
+
+ else:
+ raise errors.ProgrammerError("No iallocator or remote node")
+
+ return ResultWithJobs(jobs)
+
+
class LUInstanceGrowDisk(LogicalUnit):
"""Grow a disk of an instance.
diff --git a/lib/opcodes.py b/lib/opcodes.py
index 6a5f3be..8839eec 100644
--- a/lib/opcodes.py
+++ b/lib/opcodes.py
@@ -112,6 +112,9 @@ _PGroupNodeParams = ("ndparams", None, ht.TMaybeDict,
_PQueryWhat = ("what", ht.NoDefault, ht.TElemOf(constants.QR_VIA_OP),
"Resource(s) to query for")
+_PEarlyRelease = ("early_release", False, ht.TBool,
+ "Whether to release locks as soon as possible")
+
_PIpCheckDoc = "Whether to ensure instance's IP address is inactive"
#: Do not remember instance state changes
@@ -894,6 +897,19 @@ class OpNodeEvacStrategy(OpCode):
]
+class OpNodeEvacuate(OpCode):
+ """Evacuate instances off a number of nodes."""
+ OP_DSC_FIELD = "nodes"
+ OP_PARAMS = [
+ _PEarlyRelease,
+ ("nodes", ht.NoDefault, ht.TListOf(ht.TNonEmptyString), "Node names"),
+ ("remote_node", None, ht.TMaybeString, "New secondary node"),
+ ("iallocator", None, ht.TMaybeString, "Iallocator for computing solution"),
+ ("primary", True, ht.TBool, "Whether to evacuate primary instances"),
+ ("secondary", True, ht.TBool, "Whether to evacuate secondary instances"),
+ ]
+
+
# instance opcodes
class OpInstanceCreate(OpCode):
@@ -1044,6 +1060,7 @@ class OpInstanceReplaceDisks(OpCode):
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
+ _PEarlyRelease,
("mode", ht.NoDefault, ht.TElemOf(constants.REPLACE_MODES),
"Replacement mode"),
("disks", ht.EmptyList, ht.TListOf(ht.TPositiveInt),
@@ -1051,8 +1068,6 @@ class OpInstanceReplaceDisks(OpCode):
("remote_node", None, ht.TMaybeString, "New secondary node"),
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding new secondary node"),
- ("early_release", False, ht.TBool,
- "Whether to release locks as soon as possible"),
]
--
1.7.3.5