On 02/21/2018 01:11 PM, Michal Privoznik wrote: > Surprisingly, nothing special is happening here. If we are the > first to use the managed helper then spawn it. If not, we're > almost done.
But wasn't there a very special reason to start it between fork and exec? Does that still hold true? That is why can we start the daemon after exec now, but we couldn't before in the previous patch? It feels quite "disjoint" to have the unplug in a subsequent patch too. Considering them both in one just seems "better", but it's not a deal breaker. > > Signed-off-by: Michal Privoznik <mpriv...@redhat.com> > --- > src/qemu/qemu_hotplug.c | 72 > +++++++++++++++++++++++++++++++++++++++++++++++++ > src/qemu/qemu_process.c | 38 +++++++++++++++++++++----- > src/qemu/qemu_process.h | 7 +++++ > 3 files changed, 110 insertions(+), 7 deletions(-) > > diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c > index f28006e3c..2ebb68fbc 100644 > --- a/src/qemu/qemu_hotplug.c > +++ b/src/qemu/qemu_hotplug.c > @@ -348,6 +348,58 @@ qemuDomainChangeEjectableMedia(virQEMUDriverPtr driver, > } > > > +static int > +qemuBuildPRDefInfoProps(virDomainObjPtr vm, qemuBuild? Why not qemuDomainAdd? > + virDomainDiskDefPtr disk, > + virJSONValuePtr *prmgrProps, > + const char **prAlias, > + const char **prPath) > +{ > + qemuDomainObjPrivatePtr priv = vm->privateData; > + qemuDomainStorageSourcePrivatePtr srcPriv; > + virJSONValuePtr props = NULL; > + int ret = -1; > + > + srcPriv = QEMU_DOMAIN_STORAGE_SOURCE_PRIVATE(disk->src); ohh, umm, in qemuDomainAttachDiskGeneric there's a : srcPriv = QEMU_DOMAIN_STORAGE_SOURCE_PRIVATE(disk->src); if (srcPriv) { secinfo = srcPriv->secinfo; encinfo = srcPriv->encinfo; } Which makes light dawn that it "was" possible for srcPriv == NULL and the "thought" that the subsequent deref is going to fail rather spectacularly. See commit '8056721cb' (and a couple of others). Although it seems patch 4 and your change to qemuDomainPrepareDiskSource to call qemuDomainStorageSourcePrivateNew instead of having it called in qemuDomainSecretStorageSourcePrepare would seem to ensure that disk source privateData is always allocated now - meaning a number of other places where srcPriv is/was checked are no longer necessary. Maybe that one change needs to be "extracted" out to make things obvious. Not required, but just thinking and typing thoughts. > + > + *prmgrProps = NULL; > + > + if (priv->prPid != (pid_t) -1 || > + !srcPriv->prd || > + !srcPriv->prd->alias) > + return 0; > + > + if (virJSONValueObjectCreate(&props, > + "s:path", srcPriv->prd->path, > + NULL) < 0) > + goto cleanup; > +> + if (qemuProcessSetupOnePRDaemon(vm, disk) < 0) > + goto cleanup;> + > + *prAlias = srcPriv->prd->alias; > + *prPath = srcPriv->prd->path; > + *prmgrProps = props; > + props = NULL; > + ret = 0; > + cleanup: > + virJSONValueFree(props); > + return ret; > +} > + > + > +static void > +qemuDestroyPRDefObject(virDomainObjPtr vm, qemuDestroy? Why not qemuDomainDel? > + const char *alias, > + const char *path) > +{ > + if (!alias) > + return; BTW: This is where I'd expect to remove the object and then... > + > + qemuProcessKillPRDaemons(vm, path, false); Just unlink the path... See some thoughts below... > +} > + > + > /** > * qemuDomainAttachDiskGeneric: > * > @@ -365,12 +417,16 @@ qemuDomainAttachDiskGeneric(virQEMUDriverPtr driver, > char *devstr = NULL; > char *drivestr = NULL; > char *drivealias = NULL; > + const char *prAlias = NULL; > + const char *prPath = NULL; > bool driveAdded = false; > bool secobjAdded = false; > bool encobjAdded = false; > + bool prmgrAdded = false; > virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); > virJSONValuePtr secobjProps = NULL; > virJSONValuePtr encobjProps = NULL; > + virJSONValuePtr prmgrProps = NULL; > qemuDomainStorageSourcePrivatePtr srcPriv; > qemuDomainSecretInfoPtr secinfo = NULL; > qemuDomainSecretInfoPtr encinfo = NULL; > @@ -403,6 +459,9 @@ qemuDomainAttachDiskGeneric(virQEMUDriverPtr driver, > disk->info.alias) < 0) > goto error; > > + if (qemuBuildPRDefInfoProps(vm, disk, &prmgrProps, &prAlias, &prPath) < > 0) > + goto error; > + > if (!(drivestr = qemuBuildDriveStr(disk, false, priv->qemuCaps))) > goto error; > > @@ -435,6 +494,15 @@ qemuDomainAttachDiskGeneric(virQEMUDriverPtr driver, > encobjAdded = true; > } > > + if (prmgrProps) { > + rv = qemuMonitorAddObject(priv->mon, "pr-manager-helper", prAlias, > + prmgrProps); > + prmgrProps = NULL; /* qemuMonitorAddObject consumes */ > + if (rv < 0) > + goto exit_monitor; > + prmgrAdded = true; > + } > + So for one of the managed modes (as noted much earlier) we could be creating a object with a duplicated alias - does that work? I thought id= has to be unique. > if (qemuMonitorAddDrive(priv->mon, drivestr) < 0) > goto exit_monitor; > driveAdded = true; > @@ -455,6 +523,7 @@ qemuDomainAttachDiskGeneric(virQEMUDriverPtr driver, > cleanup: > virJSONValueFree(secobjProps); > virJSONValueFree(encobjProps); > + virJSONValueFree(prmgrProps); > qemuDomainSecretDiskDestroy(disk); > VIR_FREE(devstr); > VIR_FREE(drivestr); > @@ -472,6 +541,8 @@ qemuDomainAttachDiskGeneric(virQEMUDriverPtr driver, > ignore_value(qemuMonitorDelObject(priv->mon, secinfo->s.aes.alias)); > if (encobjAdded) > ignore_value(qemuMonitorDelObject(priv->mon, encinfo->s.aes.alias)); > + if (prmgrAdded) > + ignore_value(qemuMonitorDelObject(priv->mon, prAlias)); > if (qemuDomainObjExitMonitor(driver, vm) < 0) > ret = -2; > virErrorRestore(&orig_err); > @@ -481,6 +552,7 @@ qemuDomainAttachDiskGeneric(virQEMUDriverPtr driver, > error: > qemuDomainDelDiskSrcTLSObject(driver, vm, disk->src); > ignore_value(qemuHotplugPrepareDiskAccess(driver, vm, disk, NULL, true)); > + qemuDestroyPRDefObject(vm, prAlias, prPath); oh, I see, you're mixing the way TLS object tear down occurred and how other objects happen. If you're going to mimic TLS, then change qemuBuildPRDefInfoProps to be something like qemuDomainAddPRDefObject which would do the EnterMonitorAsync, Props mgmt, AddObject, and ExitMonitor. That way qemuDestroyPRDefObject changes to qemuDomainDelPRDefObject which would handle the failure path. Then I believe you don't have to pass around prAlias and prPath since they'd be "obtainable". > goto cleanup; > } > > diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c > index 33e0ad30c..3a914b6c6 100644 > --- a/src/qemu/qemu_process.c > +++ b/src/qemu/qemu_process.c > @@ -2556,16 +2556,40 @@ qemuProcessResctrlCreate(virQEMUDriverPtr driver, > } > > This subsequent hunk feels like it could have gone in for the previous patch. Or at the very least some function that would unlink the socket path for each of the sockets in ndisks that were created. Then that single unlink API gets exposed. > -static void > -qemuProcessKillPRDaemons(virDomainObjPtr vm) > +void > +qemuProcessKillPRDaemons(virDomainObjPtr vm, > + const char *socketPath, > + bool force) The only time force == true is when socketPath == NULL... and that's only in the shutdown path. When socketPath != NULL, force == false, and we're going to unplug the socket, right? Perhaps this would be cleaner if only @socketPath was in play. If NULL, then walk the ndisks looking for paths that you'll unlink first before killing the daemon. I actually think having a separate unlink the socket would be just fine. Does it really matter if it's the "last" one to stop the helper daemon? All I can picture is some test that continually adds and removes one socket and that just thrashing (or eventually failing) because of the startup processing. IMO: Once you start the daemon because there was a lun wanting reservations - just keep it running even if the last possible consumer was unplugged from the guest. But yes, I do understand why you would stop it if it was the last one, I'm just thinking it may not be necessary. I could be off in the weeds - as this PR processing still isn't as clear to me as it should be by this point. John > { > qemuDomainObjPrivatePtr priv = vm->privateData; > + size_t nmanaged = 0; > + size_t i; > > if (priv->prPid == (pid_t) -1) > return; > > - virProcessKillPainfully(priv->prPid, true); > - priv->prPid = (pid_t) -1; > + for (i = 0; i < vm->def->ndisks; i++) { > + qemuDomainStorageSourcePrivatePtr srcPriv; > + virDomainDiskDefPtr disk = vm->def->disks[i]; > + > + if (!virStoragePRDefIsManaged(disk->src->pr)) > + continue; > + > + nmanaged++; > + > + srcPriv = QEMU_DOMAIN_STORAGE_SOURCE_PRIVATE(disk->src); > + if (!socketPath) > + socketPath = srcPriv->prd->path; > + } > + > + if (force || nmanaged <= 1) { > + virProcessKillPainfully(priv->prPid, true); > + priv->prPid = (pid_t) -1; > + if (socketPath && > + unlink(socketPath) < 0 && > + errno != ENOENT) > + VIR_WARN("Unable to remove pr helper socket %s", socketPath); > + } > } > > > @@ -2593,7 +2617,7 @@ qemuProcessSetupOnePRDaemonHook(void *opaque) > } > > > -static int > +int > qemuProcessSetupOnePRDaemon(virDomainObjPtr vm, > virDomainDiskDefPtr disk) > { > @@ -2708,7 +2732,7 @@ qemuProcessSetupPRDaemons(virDomainObjPtr vm) > ret = 0; > cleanup: > if (ret < 0) > - qemuProcessKillPRDaemons(vm); > + qemuProcessKillPRDaemons(vm, NULL, true); > return ret; > } > > @@ -6816,7 +6840,7 @@ void qemuProcessStop(virQEMUDriverPtr driver, > VIR_FREE(vm->def->seclabels[i]->imagelabel); > } > > - qemuProcessKillPRDaemons(vm); > + qemuProcessKillPRDaemons(vm, NULL, true); > > qemuHostdevReAttachDomainDevices(driver, vm->def); > > diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h > index 274111567..ab09a7f30 100644 > --- a/src/qemu/qemu_process.h > +++ b/src/qemu/qemu_process.h > @@ -203,4 +203,11 @@ int qemuProcessRefreshDisks(virQEMUDriverPtr driver, > virDomainObjPtr vm, > qemuDomainAsyncJob asyncJob); > > +int qemuProcessSetupOnePRDaemon(virDomainObjPtr vm, > + virDomainDiskDefPtr disk); > + > +void qemuProcessKillPRDaemons(virDomainObjPtr vm, > + const char *socketPath, > + bool force); > + > #endif /* __QEMU_PROCESS_H__ */ > -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list