Commit 1bee66f3 added assertions for ensuring only the necessary locks are kept while replacing disks. One of them makes sure locks have been released during the operation. Unfortunately the commit added the check as part of a “finally” branch, which is also run when an exception is thrown (in which case the locks may not have been released yet). Errors could be masked by the assertion error. Moving the check out of the “finally” branch fixes the issue.
Signed-off-by: Michael Hanselmann <[email protected]> --- lib/cmdlib.py | 21 +++++++++++---------- 1 files changed, 11 insertions(+), 10 deletions(-) diff --git a/lib/cmdlib.py b/lib/cmdlib.py index 9702d6a..0451126 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -8342,22 +8342,23 @@ class TLReplaceDisks(Tasklet): else: fn = self._ExecDrbd8DiskOnly - return fn(feedback_fn) - + result = fn(feedback_fn) finally: # Deactivate the instance disks if we're replacing them on a # down instance if activate_disks: _SafeShutdownInstanceDisks(self.lu, self.instance) - if __debug__: - # Verify owned locks - owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_NODE) - assert ((self.early_release and not owned_locks) or - (not self.early_release and - set(owned_locks) == set(self.node_secondary_ip))), \ - ("Not owning the correct locks, early_release=%s, owned=%r" % - (self.early_release, owned_locks)) + if __debug__: + # Verify owned locks + owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_NODE) + assert ((self.early_release and not owned_locks) or + (not self.early_release and + set(owned_locks) == set(self.node_secondary_ip))), \ + ("Not owning the correct locks, early_release=%s, owned=%r" % + (self.early_release, owned_locks)) + + return result def _CheckVolumeGroup(self, nodes): self.lu.LogInfo("Checking volume groups") -- 1.7.3.5
