[libvirt] [PATCH] qemu: Check for domain being active on successful job acquire

As this is needed. Although some functions check for domain being active before obtaining job, we need to check it after, because obtaining job unlocks domain object, during which a state of domain can be changed. --- src/qemu/qemu_driver.c | 75 ++++++++++++++++++++++++++++++++++++++++++++---- 1 files changed, 69 insertions(+), 6 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index ec01cd5..1efc66b 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -1948,9 +1948,18 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int flags) if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0) goto cleanup; + + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("domain is not running")); + goto endjob; + } + qemuDomainObjEnterMonitorWithDriver(driver, vm); ret = qemuMonitorInjectNMI(priv->mon); qemuDomainObjExitMonitorWithDriver(driver, vm); + +endjob: if (qemuDomainObjEndJob(driver, vm) == 0) { vm = NULL; goto cleanup; @@ -4387,7 +4396,7 @@ static char *qemuDomainGetXMLDesc(virDomainPtr dom, virDomainObjPtr vm; char *ret = NULL; unsigned long balloon; - int err; + int err = 0; /* Flags checked by virDomainDefFormat */ @@ -4413,9 +4422,17 @@ static char *qemuDomainGetXMLDesc(virDomainPtr dom, if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_QUERY) < 0) goto cleanup; + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("domain is not running")); + goto endjob; + } + qemuDomainObjEnterMonitorWithDriver(driver, vm); err = qemuMonitorGetBalloonInfo(priv->mon, &balloon); qemuDomainObjExitMonitorWithDriver(driver, vm); + +endjob: if (qemuDomainObjEndJob(driver, vm) == 0) { vm = NULL; goto cleanup; @@ -7153,6 +7170,12 @@ qemudDomainBlockStatsFlags (virDomainPtr dom, if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) goto cleanup; + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("domain is not running")); + goto endjob; + } + qemuDomainObjEnterMonitor(driver, vm); tmp = *nparams; ret = qemuMonitorGetBlockStatsParamsNumber(priv->mon, nparams); @@ -8672,6 +8695,12 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("domain is not running")); + goto endjob; + } + VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth); qemuDomainObjEnterMonitor(driver, vm); ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth); @@ -8680,6 +8709,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, if (ret == 0) priv->migMaxBandwidth = bandwidth; +endjob: if (qemuDomainObjEndJob(driver, vm) == 0) vm = NULL; } else { @@ -8774,6 +8804,12 @@ qemuDomainSnapshotCreateActive(virConnectPtr conn, if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0) return -1; + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("domain is not running")); + goto endjob; + } + if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { /* savevm monitor command pauses the domain emitting an event which * confuses libvirt since it's not notified when qemu resumes the @@ -8823,6 +8859,7 @@ cleanup: _("resuming after snapshot failed")); } +endjob: if (vm && qemuDomainObjEndJob(driver, vm) == 0) { /* Only possible if a transient vm quit while our locks were down, * in which case we don't want to save snapshot metadata. */ @@ -9045,6 +9082,13 @@ qemuDomainSnapshotCreateDiskActive(virConnectPtr conn, if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0) return -1; + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("domain is not running")); + goto endjob; + } + + if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { /* In qemu, snapshot_blkdev on a single disk will pause cpus, * but this confuses libvirt since notifications are not given @@ -9127,12 +9171,14 @@ cleanup: (persist && virDomainSaveConfig(driver->configDir, vm->newDef) < 0)) ret = -1; - if (qemuDomainObjEndJob(driver, vm) == 0) { + } + +endjob: + if (vm && (qemuDomainObjEndJob(driver, vm) == 0)) { /* Only possible if a transient vm quit while our locks were down, * in which case we don't want to save snapshot metadata. */ *vmptr = NULL; ret = -1; - } } return ret; @@ -10272,20 +10318,28 @@ static int qemuDomainMonitorCommand(virDomainPtr domain, const char *cmd, goto cleanup; } + if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0) + goto cleanup; + + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("domain is not running")); + goto endjob; + } + priv = vm->privateData; qemuDomainObjTaint(driver, vm, VIR_DOMAIN_TAINT_CUSTOM_MONITOR, -1); hmp = !!(flags & VIR_DOMAIN_QEMU_MONITOR_COMMAND_HMP); - if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0) - goto cleanup; qemuDomainObjEnterMonitorWithDriver(driver, vm); ret = qemuMonitorArbitraryCommand(priv->mon, cmd, result, hmp); qemuDomainObjExitMonitorWithDriver(driver, vm); + +endjob: if (qemuDomainObjEndJob(driver, vm) == 0) { vm = NULL; - goto cleanup; } cleanup: @@ -10522,10 +10576,19 @@ qemuDomainBlockJobImpl(virDomainPtr dom, const char *path, if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0) goto cleanup; + + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("domain is not running")); + goto endjob; + } + qemuDomainObjEnterMonitorWithDriver(driver, vm); priv = vm->privateData; ret = qemuMonitorBlockJob(priv->mon, device, bandwidth, info, mode); qemuDomainObjExitMonitorWithDriver(driver, vm); + +endjob: if (qemuDomainObjEndJob(driver, vm) == 0) { vm = NULL; goto cleanup; -- 1.7.3.4

On 10/12/2011 05:59 AM, Michal Privoznik wrote:
As this is needed. Although some functions check for domain being active before obtaining job, we need to check it after, because obtaining job unlocks domain object, during which a state of domain can be changed. --- src/qemu/qemu_driver.c | 75 ++++++++++++++++++++++++++++++++++++++++++++---- 1 files changed, 69 insertions(+), 6 deletions(-)
ACK. We've had patches like this in the past (commit 054d43f came to my mind). It might be nicer to someday refactor the code to move the burden into the begin-job helper, instead of repeating it on all callers, but as that is a bigger patch, and would cause more merge conflicts across backports, I'm okay if it gets delayed until someone really has the time and motivation to tackle that project. -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

On 12.10.2011 23:35, Eric Blake wrote:
On 10/12/2011 05:59 AM, Michal Privoznik wrote:
As this is needed. Although some functions check for domain being active before obtaining job, we need to check it after, because obtaining job unlocks domain object, during which a state of domain can be changed. --- src/qemu/qemu_driver.c | 75 ++++++++++++++++++++++++++++++++++++++++++++---- 1 files changed, 69 insertions(+), 6 deletions(-)
ACK. We've had patches like this in the past (commit 054d43f came to my mind).
It might be nicer to someday refactor the code to move the burden into the begin-job helper, instead of repeating it on all callers, but as that is a bigger patch, and would cause more merge conflicts across backports, I'm okay if it gets delayed until someone really has the time and motivation to tackle that project.
Thanks, pushed. I basically do agree, but we need to keep in mind that some APIs does not need to check for domain being active, but in fact being not active (e.g. create).
participants (2)
-
Eric Blake
-
Michal Privoznik