Some lock managers associate state with leases, allowing a process to temporarily release its leases, and re-acquire them later, safe in the knowledge that no other process has acquired + released the leases in between.
This is already used between suspend/resume operations, and must also be used across migration. This passes the lockstate in the migration cookie. If the lock manager uses lockstate, then it becomes compulsory to use the migration v3 protocol to get the cookie support. * src/qemu/qemu_driver.c: Validate that migration v2 protocol is not used if lock manager needs state transfer * src/qemu/qemu_migration.c: Transfer lock state in migration cookie XML --- src/qemu/qemu_driver.c | 27 ++++++++- src/qemu/qemu_migration.c | 136 +++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 151 insertions(+), 12 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 112237a..fd74283 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -5848,6 +5848,8 @@ qemudDomainMigratePrepareTunnel(virConnectPtr dconn, VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC, -1); + qemuDriverLock(driver); + if (!dom_xml) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("no domain XML passed")); @@ -5864,13 +5866,19 @@ qemudDomainMigratePrepareTunnel(virConnectPtr dconn, goto cleanup; } - qemuDriverLock(driver); + if (virLockManagerPluginUsesState(driver->lockManager)) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("Cannot use migrate v2 protocol with lock manager %s"), + virLockManagerPluginGetName(driver->lockManager)); + goto cleanup; + } + ret = qemuMigrationPrepareTunnel(driver, dconn, NULL, 0, NULL, NULL, /* No cookies in v2 */ st, dname, dom_xml); - qemuDriverUnlock(driver); cleanup: + qemuDriverUnlock(driver); return ret; } @@ -5904,6 +5912,14 @@ qemudDomainMigratePrepare2 (virConnectPtr dconn, *uri_out = NULL; qemuDriverLock(driver); + + if (virLockManagerPluginUsesState(driver->lockManager)) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("Cannot use migrate v2 protocol with lock manager %s"), + virLockManagerPluginGetName(driver->lockManager)); + goto cleanup; + } + if (flags & VIR_MIGRATE_TUNNELLED) { /* this is a logical error; we never should have gotten here with * VIR_MIGRATE_TUNNELLED set @@ -5959,6 +5975,13 @@ qemudDomainMigratePerform (virDomainPtr dom, VIR_MIGRATE_NON_SHARED_INC, -1); qemuDriverLock(driver); + if (virLockManagerPluginUsesState(driver->lockManager)) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("Cannot use migrate v2 protocol with lock manager %s"), + virLockManagerPluginGetName(driver->lockManager)); + goto cleanup; + } + vm = virDomainFindByUUID(&driver->domains, dom->uuid); if (!vm) { char uuidstr[VIR_UUID_STRING_BUFLEN]; diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 8c447b4..291d7e5 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -41,6 +41,7 @@ #include "datatypes.h" #include "fdstream.h" #include "uuid.h" +#include "locking/domain_lock.h" #define VIR_FROM_THIS VIR_FROM_QEMU @@ -49,6 +50,7 @@ enum qemuMigrationCookieFlags { QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS, + QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE, QEMU_MIGRATION_COOKIE_FLAG_LAST }; @@ -56,10 +58,11 @@ enum qemuMigrationCookieFlags { VIR_ENUM_DECL(qemuMigrationCookieFlag); VIR_ENUM_IMPL(qemuMigrationCookieFlag, QEMU_MIGRATION_COOKIE_FLAG_LAST, - "graphics"); + "graphics", "lockstate"); enum qemuMigrationCookieFeatures { QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS), + QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE), }; typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics; @@ -88,6 +91,10 @@ struct _qemuMigrationCookie { unsigned char uuid[VIR_UUID_BUFLEN]; char *name; + /* If (flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) */ + char *lockState; + char *lockDriver; + /* If (flags & QEMU_MIGRATION_COOKIE_GRAPHICS) */ qemuMigrationCookieGraphicsPtr graphics; }; @@ -113,6 +120,8 @@ static void qemuMigrationCookieFree(qemuMigrationCookiePtr mig) VIR_FREE(mig->localHostname); VIR_FREE(mig->remoteHostname); VIR_FREE(mig->name); + VIR_FREE(mig->lockState); + VIR_FREE(mig->lockDriver); VIR_FREE(mig); } @@ -278,6 +287,41 @@ qemuMigrationCookieAddGraphics(qemuMigrationCookiePtr mig, } +static int +qemuMigrationCookieAddLockstate(qemuMigrationCookiePtr mig, + struct qemud_driver *driver, + virDomainObjPtr dom) +{ + qemuDomainObjPrivatePtr priv = dom->privateData; + + if (mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("Migration lockstate data already present")); + return -1; + } + + if (virDomainObjGetState(dom, NULL) == VIR_DOMAIN_PAUSED) { + if (priv->lockState && + !(mig->lockState = strdup(priv->lockState))) + return -1; + } else { + if (virDomainLockProcessInquire(driver->lockManager, dom, &mig->lockState) < 0) + return -1; + } + + if (!(mig->lockDriver = strdup(virLockManagerPluginGetName(driver->lockManager)))) { + VIR_FREE(mig->lockState); + return -1; + } + + mig->flags |= QEMU_MIGRATION_COOKIE_LOCKSTATE; + mig->flagsMandatory |= QEMU_MIGRATION_COOKIE_LOCKSTATE; + + return 0; +} + + + static void qemuMigrationCookieGraphicsXMLFormat(virBufferPtr buf, qemuMigrationCookieGraphicsPtr grap) { @@ -322,6 +366,15 @@ static void qemuMigrationCookieXMLFormat(virBufferPtr buf, mig->graphics) qemuMigrationCookieGraphicsXMLFormat(buf, mig->graphics); + if ((mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) && + mig->lockState) { + virBufferAsprintf(buf, " <lockstate driver='%s'>\n", + mig->lockDriver); + virBufferAsprintf(buf, " <leases>%s</leases>\n", + mig->lockState); + virBufferAddLit(buf, " </lockstate>\n"); + } + virBufferAddLit(buf, "</qemu-migration>\n"); } @@ -504,6 +557,19 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, (!(mig->graphics = qemuMigrationCookieGraphicsXMLParse(ctxt)))) goto error; + if ((flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) && + virXPathBoolean("count(./lockstate) > 0", ctxt)) { + mig->lockDriver = virXPathString("string(./lockstate[1]/@driver)", ctxt); + if (!mig->lockDriver) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("Missing lock driver name in migration cookie")); + goto error; + } + mig->lockState = virXPathString("string(./lockstate[1]/leases[1])", ctxt); + if (mig->lockState && STREQ(mig->lockState, "")) + VIR_FREE(mig->lockState); + } + return 0; error: @@ -564,6 +630,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, qemuMigrationCookieAddGraphics(mig, driver, dom) < 0) return -1; + if (flags & QEMU_MIGRATION_COOKIE_LOCKSTATE && + qemuMigrationCookieAddLockstate(mig, driver, dom) < 0) + return -1; + if (!(*cookieout = qemuMigrationCookieXMLFormatStr(mig))) return -1; @@ -576,7 +646,8 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, static qemuMigrationCookiePtr -qemuMigrationEatCookie(virDomainObjPtr dom, +qemuMigrationEatCookie(struct qemud_driver *driver, + virDomainObjPtr dom, const char *cookiein, int cookieinlen, int flags) @@ -602,6 +673,24 @@ qemuMigrationEatCookie(virDomainObjPtr dom, flags) < 0) goto error; + if (mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) { + if (!mig->lockDriver) { + if (virLockManagerPluginUsesState(driver->lockManager)) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("Missing %s lock state for migration cookie"), + virLockManagerPluginGetName(driver->lockManager)); + goto error; + } + } else if (STRNEQ(mig->lockDriver, + virLockManagerPluginGetName(driver->lockManager))) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("Source host lock driver %s different from target %s"), + mig->lockDriver, + virLockManagerPluginGetName(driver->lockManager)); + goto error; + } + } + return mig; error: @@ -893,12 +982,12 @@ char *qemuMigrationBegin(struct qemud_driver *driver, if (!qemuMigrationIsAllowed(vm->def)) goto cleanup; - if (!(mig = qemuMigrationEatCookie(vm, NULL, 0, 0))) + if (!(mig = qemuMigrationEatCookie(driver, vm, NULL, 0, 0))) goto cleanup; if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, - 0) < 0) + QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0) goto cleanup; rv = qemuDomainFormatXML(driver, vm, @@ -976,7 +1065,8 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver, def = NULL; priv = vm->privateData; - if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0))) + if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, + QEMU_MIGRATION_COOKIE_LOCKSTATE))) goto cleanup; if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) @@ -1211,7 +1301,8 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver, def = NULL; priv = vm->privateData; - if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0))) + if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, + QEMU_MIGRATION_COOKIE_LOCKSTATE))) goto cleanup; if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) @@ -1239,6 +1330,15 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver, goto endjob; } + if (mig->lockState) { + VIR_DEBUG("Received lockstate %s", mig->lockState); + VIR_FREE(priv->lockState); + priv->lockState = mig->lockState; + mig->lockState = NULL; + } else { + VIR_DEBUG("Received no lockstate"); + } + if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) { /* We could tear down the whole guest here, but @@ -1309,7 +1409,15 @@ static int doNativeMigrate(struct qemud_driver *driver, driver, vm, uri, NULLSTR(cookiein), cookieinlen, cookieout, cookieoutlen, flags, dname, resource); - if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, + if (virLockManagerPluginUsesState(driver->lockManager) && + !cookieout) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("Migration with lock driver %s requires cookie support"), + virLockManagerPluginGetName(driver->lockManager)); + return -1; + } + + if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup; @@ -1506,6 +1614,14 @@ static int doTunnelMigrate(struct qemud_driver *driver, driver, vm, st, NULLSTR(cookiein), cookieinlen, cookieout, cookieoutlen, flags, resource); + if (virLockManagerPluginUsesState(driver->lockManager) && + !cookieout) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("Migration with lock driver %s requires cookie support"), + virLockManagerPluginGetName(driver->lockManager)); + return -1; + } + if (!qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX) && !qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) { qemuReportError(VIR_ERR_OPERATION_FAILED, @@ -1565,7 +1681,7 @@ static int doTunnelMigrate(struct qemud_driver *driver, goto cleanup; } - if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, + if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup; @@ -2265,7 +2381,7 @@ qemuMigrationFinish(struct qemud_driver *driver, priv->jobActive = QEMU_JOB_NONE; memset(&priv->jobInfo, 0, sizeof(priv->jobInfo)); - if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0))) + if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) goto cleanup; if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) @@ -2420,7 +2536,7 @@ int qemuMigrationConfirm(struct qemud_driver *driver, driver, conn, vm, NULLSTR(cookiein), cookieinlen, flags, retcode); - if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0))) + if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) return -1; if (!virDomainObjIsActive(vm)) { -- 1.7.4.4 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list