qemuDomainSnapshotRevertInactive has the same FIXMEs as
qemuDomainSnapshotCreateInactive, so algorithmic fixes to properly
handle partial loop iterations should be applied later to both
functions, but we're not making the situation any worse in
this patch.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Use
qemu-img rather than 'qemu -loadvm' to revert to offline snapshot.
(qemuDomainSnapshotRevertInactive): New helper.
---
src/qemu/qemu_driver.c | 65 +++++++++++++++++++++++++++++++++++++++++------
1 files changed, 56 insertions(+), 9 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 16daee2..ac323b6 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -8726,6 +8726,52 @@ cleanup:
return xml;
}
+/* The domain is expected to be locked and inactive. */
+static int
+qemuDomainSnapshotRevertInactive(virDomainObjPtr vm,
+ virDomainSnapshotObjPtr snap)
+{
+ const char *qemuimgarg[] = { NULL, "snapshot", "-a", NULL, NULL,
NULL };
+ int ret = -1;
+ int i;
+
+ qemuimgarg[0] = qemuFindQemuImgBinary();
+ if (qemuimgarg[0] == NULL) {
+ /* qemuFindQemuImgBinary set the error */
+ goto cleanup;
+ }
+
+ qemuimgarg[3] = snap->def->name;
+
+ for (i = 0; i < vm->def->ndisks; i++) {
+ /* FIXME: we also need to handle LVM here */
+ /* FIXME: if we fail halfway through this loop, we are in an
+ * inconsistent state. I'm not quite sure what to do about that
+ */
+ if (vm->def->disks[i]->device == VIR_DOMAIN_DISK_DEVICE_DISK) {
+ if (!vm->def->disks[i]->driverType ||
+ STRNEQ(vm->def->disks[i]->driverType, "qcow2")) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID,
+ _("Disk device '%s' does not support"
+ " snapshotting"),
+ vm->def->disks[i]->info.alias);
+ goto cleanup;
+ }
+
+ qemuimgarg[4] = vm->def->disks[i]->src;
+
+ if (virRun(qemuimgarg, NULL) < 0)
+ goto cleanup;
+ }
+ }
+
+ ret = 0;
+
+cleanup:
+ VIR_FREE(qemuimgarg[0]);
+ return ret;
+}
+
static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
unsigned int flags)
{
@@ -8822,14 +8868,9 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr
snapshot,
VIR_DOMAIN_EVENT_STARTED_FROM_SNAPSHOT);
}
} else {
- /* qemu is a little funny with running guests and the restoration
- * of snapshots. If the snapshot was taken online,
- * then after a "loadvm" monitor command, the VM is set running
- * again. If the snapshot was taken offline, then after a "loadvm"
- * monitor command the VM is left paused. Unpausing it leads to
- * the memory state *before* the loadvm with the disk *after* the
- * loadvm, which obviously is bound to corrupt something.
- * Therefore we destroy the domain and set it to "off" in this case.
+ /* Newer qemu -loadvm refuses to revert to the state of a snapshot
+ * created by qemu-img snapshot -c. If the domain is running, we
+ * must take it offline; then do the revert using qemu-img.
*/
if (virDomainObjIsActive(vm)) {
@@ -8839,6 +8880,12 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr
snapshot,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
if (!vm->persistent) {
+ /* XXX it should be impossible to create an offline snapshot
+ * of a transient domain. Once we fix 'undefine' to convert
+ * a defined domain back to transient, that transition should
+ * be rejected if any offline snapshots exist. For now, we
+ * just stop the transient domain and quit, without reverting
+ * any disk state. */
if (qemuDomainObjEndJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
@@ -8846,7 +8893,7 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr
snapshot,
}
}
- if (qemuDomainSnapshotSetCurrentActive(vm, driver->snapshotDir) < 0)
+ if (qemuDomainSnapshotRevertInactive(vm, snap) < 0)
goto endjob;
}
--
1.7.4.4