On Wed, Aug 24, 2011 at 09:22:20AM -0600, Eric Blake wrote:
qemuDomainSnapshotRevertInactive has the same FIXMEs as
qemuDomainSnapshotCreateInactive, so algorithmic fixes to properly
handle partial loop iterations should be applied later to both
functions, but we're not making the situation any worse in
this patch.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Use
qemu-img rather than 'qemu -loadvm' to revert to offline snapshot.
(qemuDomainSnapshotRevertInactive): New helper.
---
src/qemu/qemu_driver.c | 65 +++++++++++++++++++++++++++++++++++++++++------
1 files changed, 56 insertions(+), 9 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 7950aff..1caa870 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -8738,6 +8738,52 @@ cleanup:
return xml;
}
+/* The domain is expected to be locked and inactive. */
+static int
+qemuDomainSnapshotRevertInactive(virDomainObjPtr vm,
+ virDomainSnapshotObjPtr snap)
+{
+ const char *qemuimgarg[] = { NULL, "snapshot", "-a", NULL, NULL,
NULL };
+ int ret = -1;
+ int i;
+
+ qemuimgarg[0] = qemuFindQemuImgBinary();
+ if (qemuimgarg[0] == NULL) {
+ /* qemuFindQemuImgBinary set the error */
+ goto cleanup;
+ }
+
+ qemuimgarg[3] = snap->def->name;
+
+ for (i = 0; i < vm->def->ndisks; i++) {
+ /* FIXME: we also need to handle LVM here */
+ /* FIXME: if we fail halfway through this loop, we are in an
+ * inconsistent state. I'm not quite sure what to do about that
+ */
+ if (vm->def->disks[i]->device == VIR_DOMAIN_DISK_DEVICE_DISK) {
+ if (!vm->def->disks[i]->driverType ||
+ STRNEQ(vm->def->disks[i]->driverType, "qcow2")) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID,
+ _("Disk device '%s' does not support"
+ " snapshotting"),
+ vm->def->disks[i]->info.alias);
+ goto cleanup;
+ }
+
+ qemuimgarg[4] = vm->def->disks[i]->src;
+
+ if (virRun(qemuimgarg, NULL) < 0)
+ goto cleanup;
+ }
+ }
+
+ ret = 0;
+
+cleanup:
+ VIR_FREE(qemuimgarg[0]);
+ return ret;
+}
I know that virRun is implemented in terms of virCommand, but should
be we aiming to convert virRun uses over to directly use virCommand ?
@@ -8844,14 +8890,9 @@ static int
qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
VIR_DOMAIN_EVENT_STARTED_FROM_SNAPSHOT);
}
} else {
- /* qemu is a little funny with running guests and the restoration
- * of snapshots. If the snapshot was taken online,
- * then after a "loadvm" monitor command, the VM is set running
- * again. If the snapshot was taken offline, then after a "loadvm"
- * monitor command the VM is left paused. Unpausing it leads to
- * the memory state *before* the loadvm with the disk *after* the
- * loadvm, which obviously is bound to corrupt something.
- * Therefore we destroy the domain and set it to "off" in this case.
+ /* Newer qemu -loadvm refuses to revert to the state of a snapshot
+ * created by qemu-img snapshot -c. If the domain is running, we
+ * must take it offline; then do the revert using qemu-img.
*/
if (virDomainObjIsActive(vm)) {
@@ -8861,6 +8902,12 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr
snapshot,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
if (!vm->persistent) {
+ /* XXX it should be impossible to create an offline snapshot
+ * of a transient domain. Once we fix 'undefine' to convert
+ * a defined domain back to transient, that transition should
+ * be rejected if any offline snapshots exist. For now, we
+ * just stop the transient domain and quit, without reverting
+ * any disk state. */
if (qemuDomainObjEndJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
@@ -8868,7 +8915,7 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr
snapshot,
}
}
- if (qemuDomainSnapshotSetCurrentActive(vm, driver->snapshotDir) < 0)
+ if (qemuDomainSnapshotRevertInactive(vm, snap) < 0)
goto endjob;
}
ACK
Daniel
--
|:
http://berrange.com -o-
http://www.flickr.com/photos/dberrange/ :|
|:
http://libvirt.org -o-
http://virt-manager.org :|
|:
http://autobuild.org -o-
http://search.cpan.org/~danberr/ :|
|:
http://entangle-photo.org -o-
http://live.gnome.org/gtk-vnc :|