When libvirtd is restarted during migration, we properly cancel the
ongoing migration (unless it managed to almost finished before the
restart). But if we were also migrating storage using NBD, we would
completely forget about the running disk mirrors.
Signed-off-by: Jiri Denemark jdene...@redhat.com
---
Notes:
Version 2:
- make use of qemuMonitorGetAllBlockJobInfo introduced by the previous patch
- undo qemuBlockJobSyncBegin in case of error
src/qemu/qemu_domain.c| 45 -
src/qemu/qemu_migration.c | 85 +++
src/qemu/qemu_migration.h | 3 ++
src/qemu/qemu_process.c | 8 +
4 files changed, 133 insertions(+), 8 deletions(-)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index f6c9add..8cb4daa 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -578,7 +578,27 @@ qemuDomainObjPrivateXMLFormat(virBufferPtr buf,
qemuDomainAsyncJobPhaseToString(
priv-job.asyncJob, priv-job.phase));
}
-virBufferAddLit(buf, /\n);
+if (priv-job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
+virBufferAddLit(buf, /\n);
+} else {
+size_t i;
+virDomainDiskDefPtr disk;
+qemuDomainDiskPrivatePtr diskPriv;
+
+virBufferAddLit(buf, \n);
+virBufferAdjustIndent(buf, 2);
+
+for (i = 0; i vm-def-ndisks; i++) {
+disk = vm-def-disks[i];
+diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
+virBufferAsprintf(buf, disk dev='%s' migrating='%s'/\n,
+ disk-dst,
+ diskPriv-migrating ? yes : no);
+}
+
+virBufferAdjustIndent(buf, -2);
+virBufferAddLit(buf, /job\n);
+}
}
priv-job.active = job;
@@ -736,6 +756,29 @@ qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt,
}
}
+if ((n = virXPathNodeSet(./job[1]/disk[@migrating='yes'],
+ ctxt, nodes)) 0) {
+virReportError(VIR_ERR_INTERNAL_ERROR, %s,
+ _(failed to parse list of disks marked for
migration));
+goto error;
+}
+if (n 0) {
+if (priv-job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
+VIR_WARN(Found disks marked for migration but we were not
+ migrating);
+n = 0;
+}
+for (i = 0; i n; i++) {
+char *dst = virXMLPropString(nodes[i], dev);
+virDomainDiskDefPtr disk;
+
+if (dst (disk = virDomainDiskByName(vm-def, dst, false)))
+QEMU_DOMAIN_DISK_PRIVATE(disk)-migrating = true;
+VIR_FREE(dst);
+}
+}
+VIR_FREE(nodes);
+
priv-fakeReboot = virXPathBoolean(boolean(./fakereboot), ctxt) == 1;
if ((n = virXPathNodeSet(./devices/device, ctxt, nodes)) 0) {
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index fedadd7..f6b9aa0 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -1985,6 +1985,7 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver,
char *hoststr = NULL;
unsigned int mirror_flags = VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT;
int rv;
+virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
VIR_DEBUG(Starting drive mirrors for domain %s, vm-def-name);
@@ -2034,6 +2035,11 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver,
goto cleanup;
}
diskPriv-migrating = true;
+
+if (virDomainSaveStatus(driver-xmlopt, cfg-stateDir, vm) 0) {
+VIR_WARN(Failed to save status on vm %s, vm-def-name);
+goto cleanup;
+}
}
while ((rv = qemuMigrationDriveMirrorReady(driver, vm)) != 1) {
@@ -2061,6 +2067,7 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver,
ret = 0;
cleanup:
+virObjectUnref(cfg);
VIR_FREE(diskAlias);
VIR_FREE(nbd_dest);
VIR_FREE(hoststr);
@@ -5683,6 +5690,84 @@ qemuMigrationToFile(virQEMUDriverPtr driver,
virDomainObjPtr vm,
return ret;
}
+
+int
+qemuMigrationCancel(virQEMUDriverPtr driver,
+virDomainObjPtr vm)
+{
+qemuDomainObjPrivatePtr priv = vm-privateData;
+virHashTablePtr blockJobs = NULL;
+bool storage = false;
+size_t i;
+int ret = -1;
+
+VIR_DEBUG(Canceling unfinished outgoing migration of domain %s,
+ vm-def-name);
+
+for (i = 0; i vm-def-ndisks; i++) {
+virDomainDiskDefPtr disk = vm-def-disks[i];
+if (QEMU_DOMAIN_DISK_PRIVATE(disk)-migrating) {
+qemuBlockJobSyncBegin(disk);
+storage = true;
+}
+}
+
+qemuDomainObjEnterMonitor(driver, vm);
+
+ignore_value(qemuMonitorMigrateCancel(priv-mon));
+if (storage)
+blockJobs = qemuMonitorGetAllBlockJobInfo(priv-mon);
+
+if