Daniel P. Berrange wrote:
On Tue, Apr 27, 2010 at 06:45:16PM +0200, Jim Meyering wrote:
> I ran clang on the very latest and it spotted this problem:
> >From qemu_driver.c, around line 11100,
>
> else {
> /* qemu is a little funny with running guests and the restoration
> * of snapshots. If the snapshot was taken online,
> * then after a "loadvm" monitor command, the VM is set running
> * again. If the snapshot was taken offline, then after a
"loadvm"
> * monitor command the VM is left paused. Unpausing it leads to
> * the memory state *before* the loadvm with the disk *after* the
> * loadvm, which obviously is bound to corrupt something.
> * Therefore we destroy the domain and set it to "off" in this
case.
> */
>
> if (virDomainObjIsActive(vm)) {
> qemudShutdownVMDaemon(driver, vm);
> event = virDomainEventNewFromObj(vm,
> VIR_DOMAIN_EVENT_STOPPED,
>
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
> if (!vm->persistent) {
> if (qemuDomainObjEndJob(vm) > 0)
> virDomainRemoveInactive(&driver->domains, vm);
> vm = NULL;
This needs to add 'goto endjob' or possibly 'goto cleanup'
No point in endjob, since it does nothing when vm == NULL.
Here's a tentative patch for that and another, similar problem
(haven't even compiled it or run it through clang, but have to run).
Will follow up tomorrow.
From c508c0228bbefe396532d16f6a8979cc219bedee Mon Sep 17 00:00:00 2001
From: Jim Meyering <meyering(a)redhat.com>
Date: Tue, 27 Apr 2010 22:35:32 +0200
Subject: [PATCH] qemuDomainSnapshotCreateXML: avoid NULL dereference
* src/qemu/qemu_driver.c (qemuDomainSnapshotCreateXML): When setting
"vm" to NULL, jump over vm-dereferencing code to "cleanup".
(qemuDomainRevertToSnapshot): Likewise.
---
src/qemu/qemu_driver.c | 5 ++++-
1 files changed, 4 insertions(+), 1 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 2daf038..a164560 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -10761,36 +10761,38 @@ static virDomainSnapshotPtr
qemuDomainSnapshotCreateXML(virDomainPtr domain,
qemuimgarg[0], snap->def->name,
vm->def->disks[i]->src);
goto cleanup;
}
}
}
}
else {
qemuDomainObjPrivatePtr priv;
int ret;
if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
goto cleanup;
priv = vm->privateData;
qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorCreateSnapshot(priv->mon, def->name);
qemuDomainObjExitMonitorWithDriver(driver, vm);
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(vm) == 0) {
vm = NULL;
+ goto cleanup;
+ }
if (ret < 0)
goto cleanup;
}
snap->def->state = vm->state;
/* FIXME: if we fail after this point, there's not a whole lot we can
* do; we've successfully taken the snapshot, and we are now running
* on it, so we have to go forward the best we can
*/
if (vm->current_snapshot) {
def->parent = strdup(vm->current_snapshot->def->name);
if (def->parent == NULL) {
virReportOOMError();
goto cleanup;
}
@@ -11091,34 +11093,35 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr
snapshot,
* then after a "loadvm" monitor command, the VM is set running
* again. If the snapshot was taken offline, then after a "loadvm"
* monitor command the VM is left paused. Unpausing it leads to
* the memory state *before* the loadvm with the disk *after* the
* loadvm, which obviously is bound to corrupt something.
* Therefore we destroy the domain and set it to "off" in this case.
*/
if (virDomainObjIsActive(vm)) {
qemudShutdownVMDaemon(driver, vm);
event = virDomainEventNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
if (!vm->persistent) {
if (qemuDomainObjEndJob(vm) > 0)
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
+ goto cleanup;
}
}
if (qemuDomainSnapshotSetActive(vm, driver->snapshotDir) < 0)
goto endjob;
}
vm->state = snap->def->state;
ret = 0;
endjob:
if (vm && qemuDomainObjEndJob(vm) == 0)
vm = NULL;
cleanup:
if (event)
--
1.7.1.328.g9993c