https://bugzilla.redhat.com/show_bug.cgi?id=638285 - when migrating
a guest, it was very easy to provoke a race where an application
could query block information on a VM that had just been migrated
away. Any time qemu code obtains a job lock, it must also check
that the VM was not taken down in the time where it was waiting
for the lock.
* src/qemu/qemu_driver.c (qemudDomainSetMemory)
(qemudDomainGetInfo, qemuDomainGetBlockInfo): Check that vm still
exists after obtaining job lock, before starting monitor action.
---
src/qemu/qemu_driver.c | 25 ++++++++++++++++++++++---
1 files changed, 22 insertions(+), 3 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index ae1d833..af5126d 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -463,7 +463,8 @@ static int ATTRIBUTE_RETURN_CHECK qemuDomainObjEndJob(virDomainObjPtr
obj)
* obj must be locked before calling, qemud_driver must be unlocked
*
* To be called immediately before any QEMU monitor API call
- * Must have alrady called qemuDomainObjBeginJob().
+ * Must have already called qemuDomainObjBeginJob(), and checked
+ * that the VM is still active.
*
* To be followed with qemuDomainObjExitMonitor() once complete
*/
@@ -504,7 +505,7 @@ static void qemuDomainObjExitMonitor(virDomainObjPtr obj)
* obj must be locked before calling, qemud_driver must be locked
*
* To be called immediately before any QEMU monitor API call
- * Must have alrady called qemuDomainObjBeginJob().
+ * Must have already called qemuDomainObjBeginJob().
*
* To be followed with qemuDomainObjExitMonitorWithDriver() once complete
*/
@@ -522,7 +523,7 @@ static void qemuDomainObjEnterMonitorWithDriver(struct qemud_driver
*driver, vir
/* obj must NOT be locked before calling, qemud_driver must be unlocked,
* and will be locked after returning
*
- * Should be paired with an earlier qemuDomainObjEnterMonitor() call
+ * Should be paired with an earlier qemuDomainObjEnterMonitorWithDriver() call
*/
static void qemuDomainObjExitMonitorWithDriver(struct qemud_driver *driver,
virDomainObjPtr obj)
{
@@ -4948,6 +4949,12 @@ static int qemudDomainSetMemory(virDomainPtr dom, unsigned long
newmem) {
if (qemuDomainObjBeginJob(vm) < 0)
goto cleanup;
+ if (!virDomainObjIsActive(vm)) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("domain is not running"));
+ goto endjob;
+ }
+
priv = vm->privateData;
qemuDomainObjEnterMonitor(vm);
r = qemuMonitorSetBalloon(priv->mon, newmem);
@@ -5014,11 +5021,17 @@ static int qemudDomainGetInfo(virDomainPtr dom,
} else if (!priv->jobActive) {
if (qemuDomainObjBeginJob(vm) < 0)
goto cleanup;
+ if (!virDomainObjIsActive(vm)) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("domain is not running"));
+ goto endjob;
+ }
qemuDomainObjEnterMonitor(vm);
err = qemuMonitorGetBalloonInfo(priv->mon, &balloon);
qemuDomainObjExitMonitor(vm);
if (err < 0) {
+ endjob:
if (qemuDomainObjEndJob(vm) == 0)
vm = NULL;
goto cleanup;
@@ -10343,6 +10356,11 @@ static int qemuDomainGetBlockInfo(virDomainPtr dom,
qemuDomainObjPrivatePtr priv = vm->privateData;
if (qemuDomainObjBeginJob(vm) < 0)
goto cleanup;
+ if (!virDomainObjIsActive(vm)) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("domain is not running"));
+ goto endjob;
+ }
qemuDomainObjEnterMonitor(vm);
ret = qemuMonitorGetBlockExtent(priv->mon,
@@ -10350,6 +10368,7 @@ static int qemuDomainGetBlockInfo(virDomainPtr dom,
&info->allocation);
qemuDomainObjExitMonitor(vm);
+ endjob:
if (qemuDomainObjEndJob(vm) == 0)
vm = NULL;
} else {
--
1.7.2.3