[PATCH 0/9] qemu: snapshot: Fix internal snapshot reversion with NVRAM image

This series fixes two things: - inactive snapshot handling with NVRAM image - use of '-loadvm' commandline option to revert snapshots, both are individually described Peter Krempa (9): qemu: Don't store path to qemu img qemuDomainSnapshotForEachQcow2Raw: Remove 'driver' argument qemu: Move 'qemuDomainSnapshotForEachQcow2(Raw)' to qemu_snapshot.c qemuSnapshotForEachQcow2: Refactor qemuSnapshotForEachQcow2: Handle also NVRAM image for internal snapshots qemu: monitor: Add monitor infrastructure for 'snapshot-load' QMP command qemu: Add enum entries for 'snapshot-load' qemu job qemu: monitor: Extract vmstate presence for internal snapshots in qemuBlockGetNamedNodeData qemu: Avoid use of '-loadvm' commandline argument for internal snapshot reversion src/qemu/qemu_block.c | 1 + src/qemu/qemu_blockjob.c | 2 + src/qemu/qemu_blockjob.h | 1 + src/qemu/qemu_command.c | 5 +- src/qemu/qemu_conf.h | 3 - src/qemu/qemu_domain.c | 106 +---- src/qemu/qemu_domain.h | 8 - src/qemu/qemu_driver.c | 3 - src/qemu/qemu_monitor.c | 16 + src/qemu/qemu_monitor.h | 19 +- src/qemu/qemu_monitor_json.c | 49 +- src/qemu/qemu_monitor_json.h | 7 + src/qemu/qemu_process.c | 7 + src/qemu/qemu_snapshot.c | 437 ++++++++++++++++-- src/qemu/qemu_snapshot.h | 5 + tests/qemublocktest.c | 14 +- .../bitmap/snapshots-internal.out | 2 +- 17 files changed, 511 insertions(+), 174 deletions(-) -- 2.47.0

The 'virCommand' helpers already look up the full path to the binary in PATH if it's not specified. This means that the qemu driver doesn't have to lookup and store the path to 'qemu-img' in the conf object but rather can be cleaned up to use this new infrastructure. Signed-off-by: Peter Krempa <pkrempa@redhat.com> --- src/qemu/qemu_conf.h | 3 --- src/qemu/qemu_domain.c | 20 +------------------- src/qemu/qemu_domain.h | 2 -- src/qemu/qemu_driver.c | 3 --- src/qemu/qemu_snapshot.c | 33 +++++++++------------------------ 5 files changed, 10 insertions(+), 51 deletions(-) diff --git a/src/qemu/qemu_conf.h b/src/qemu/qemu_conf.h index 157aba9e18..23a900193e 100644 --- a/src/qemu/qemu_conf.h +++ b/src/qemu/qemu_conf.h @@ -271,9 +271,6 @@ struct _virQEMUDriver { /* Immutable pointer, self-locking APIs */ virDomainObjList *domains; - /* Immutable pointer */ - char *qemuImgBinary; - /* Immutable pointer, lockless APIs. Pointless abstraction */ ebtablesContext *ebtables; diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index c798ef37fd..e8e72e5091 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -5671,17 +5671,6 @@ qemuDomainLogAppendMessage(virQEMUDriver *driver, } -/* Locate an appropriate 'qemu-img' binary. */ -const char * -qemuFindQemuImgBinary(virQEMUDriver *driver) -{ - if (!driver->qemuImgBinary) - virReportError(VIR_ERR_INTERNAL_ERROR, - "%s", _("unable to find qemu-img")); - - return driver->qemuImgBinary; -} - int qemuDomainSnapshotWriteMetadata(virDomainObj *vm, virDomainMomentObj *snapshot, @@ -5727,18 +5716,11 @@ qemuDomainSnapshotForEachQcow2Raw(virQEMUDriver *driver, int ndisks) { virDomainSnapshotDef *snapdef = virDomainSnapshotObjGetDef(snap); - const char *qemuimgbin; size_t i; bool skipped = false; - qemuimgbin = qemuFindQemuImgBinary(driver); - if (qemuimgbin == NULL) { - /* qemuFindQemuImgBinary set the error */ - return -1; - } - for (i = 0; i < ndisks; i++) { - g_autoptr(virCommand) cmd = virCommandNewArgList(qemuimgbin, "snapshot", + g_autoptr(virCommand) cmd = virCommandNewArgList("qemu-img", "snapshot", op, snap->def->name, NULL); int format = virDomainDiskGetFormat(def->disks[i]); diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index 1ae421e5f2..091b27823b 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -665,8 +665,6 @@ int qemuDomainLogAppendMessage(virQEMUDriver *driver, const char *fmt, ...) G_GNUC_PRINTF(3, 4); -const char *qemuFindQemuImgBinary(virQEMUDriver *driver); - int qemuDomainSnapshotWriteMetadata(virDomainObj *vm, virDomainMomentObj *snapshot, virDomainXMLOption *xmlopt, diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 72a9542c0b..90d55e53ec 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -679,8 +679,6 @@ qemuStateInitialize(bool privileged, virPidFileAcquire(cfg->stateDir, "driver", getpid())) < 0) goto error; - qemu_driver->qemuImgBinary = virFindFileInPath("qemu-img"); - if (!(qemu_driver->lockManager = virLockManagerPluginNew(cfg->lockManagerName ? cfg->lockManagerName : "nop", @@ -1065,7 +1063,6 @@ qemuStateCleanup(void) virCPUDefFree(qemu_driver->hostcpu); virObjectUnref(qemu_driver->caps); ebtablesContextFree(qemu_driver->ebtables); - VIR_FREE(qemu_driver->qemuImgBinary); virObjectUnref(qemu_driver->domains); virObjectUnref(qemu_driver->nbdkitCapsCache); diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index 795522da21..35c8d67d20 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -185,7 +185,6 @@ qemuSnapshotDomainDefUpdateDisk(virDomainDef *domdef, /** * qemuSnapshotCreateQcow2Files: - * @driver: QEMU driver * @def: domain definition * @snapdef: snapshot definition * @created: bitmap to store which disks were created @@ -196,20 +195,15 @@ qemuSnapshotDomainDefUpdateDisk(virDomainDef *domdef, * Returns 0 on success, -1 on error. */ static int -qemuSnapshotCreateQcow2Files(virQEMUDriver *driver, - virDomainDef *def, +qemuSnapshotCreateQcow2Files(virDomainDef *def, virDomainSnapshotDef *snapdef, virBitmap *created) { size_t i; - const char *qemuImgPath; g_auto(virBuffer) buf = VIR_BUFFER_INITIALIZER; virDomainSnapshotDiskDef *snapdisk = NULL; virDomainDiskDef *defdisk = NULL; - if (!(qemuImgPath = qemuFindQemuImgBinary(driver))) - return -1; - for (i = 0; i < snapdef->ndisks; i++) { g_autoptr(virCommand) cmd = NULL; snapdisk = &(snapdef->disks[i]); @@ -225,7 +219,7 @@ qemuSnapshotCreateQcow2Files(virQEMUDriver *driver, return -1; /* creates cmd line args: qemu-img create -f qcow2 -o */ - if (!(cmd = virCommandNewArgList(qemuImgPath, + if (!(cmd = virCommandNewArgList("qemu-img", "create", "-f", virStorageFileFormatTypeToString(snapdisk->src->format), @@ -281,7 +275,7 @@ qemuSnapshotCreateInactiveExternal(virQEMUDriver *driver, /* If reuse is true, then qemuSnapshotPrepare already * ensured that the new files exist, and it was up to the user to * create them correctly. */ - if (!reuse && qemuSnapshotCreateQcow2Files(driver, vm->def, snapdef, created) < 0) + if (!reuse && qemuSnapshotCreateQcow2Files(vm->def, snapdef, created) < 0) goto cleanup; /* update disk definitions */ @@ -2300,7 +2294,6 @@ qemuSnapshotRevertExternalActive(virDomainObj *vm, /** * qemuSnapshotRevertExternalInactive: - * @vm: domain object * @tmpsnapdef: temporary snapshot definition * @domdef: offline domain definition * @@ -2310,17 +2303,15 @@ qemuSnapshotRevertExternalActive(virDomainObj *vm, * Returns 0 on success, -1 on error. */ static int -qemuSnapshotRevertExternalInactive(virDomainObj *vm, - virDomainSnapshotDef *tmpsnapdef, +qemuSnapshotRevertExternalInactive(virDomainSnapshotDef *tmpsnapdef, virDomainDef *domdef) { - virQEMUDriver *driver = QEMU_DOMAIN_PRIVATE(vm)->driver; g_autoptr(virBitmap) created = NULL; int ret = -1; created = virBitmapNew(tmpsnapdef->ndisks); - if (qemuSnapshotCreateQcow2Files(driver, domdef, tmpsnapdef, created) < 0) + if (qemuSnapshotCreateQcow2Files(domdef, tmpsnapdef, created) < 0) goto cleanup; if (qemuSnapshotDomainDefUpdateDisk(domdef, tmpsnapdef, false) < 0) @@ -2613,7 +2604,7 @@ qemuSnapshotRevertInactive(virDomainObj *vm, return -1; } - if (qemuSnapshotRevertExternalInactive(vm, tmpsnapdef, + if (qemuSnapshotRevertExternalInactive(tmpsnapdef, *inactiveConfig) < 0) { return -1; } @@ -3443,22 +3434,16 @@ qemuSnapshotSetInvalid(virDomainObj *vm, static void -qemuSnapshotUpdateBackingStore(virDomainObj *vm, - qemuSnapshotDeleteExternalData *data) +qemuSnapshotUpdateBackingStore(qemuSnapshotDeleteExternalData *data) { GSList *cur = NULL; - const char *qemuImgPath; - virQEMUDriver *driver = QEMU_DOMAIN_PRIVATE(vm)->driver; - - if (!(qemuImgPath = qemuFindQemuImgBinary(driver))) - return; for (cur = data->disksWithBacking; cur; cur = g_slist_next(cur)) { struct _qemuSnapshotDisksWithBackingStoreData *backingData = cur->data; g_autoptr(virCommand) cmd = NULL; /* creates cmd line args: qemu-img create -f qcow2 -o */ - if (!(cmd = virCommandNewArgList(qemuImgPath, + if (!(cmd = virCommandNewArgList("qemu-img", "rebase", "-u", "-F", @@ -3565,7 +3550,7 @@ qemuSnapshotDiscardExternal(virDomainObj *vm, qemuBlockJobSyncEnd(vm, data->job, VIR_ASYNC_JOB_SNAPSHOT); - qemuSnapshotUpdateBackingStore(vm, data); + qemuSnapshotUpdateBackingStore(data); if (qemuSnapshotSetInvalid(vm, data->parentSnap, data->snapDisk, false) < 0) goto error; -- 2.47.0

Now that it's unused except for the recursive call it can be dropped from all of the call tree. Signed-off-by: Peter Krempa <pkrempa@redhat.com> --- src/qemu/qemu_domain.c | 15 +++++---------- src/qemu/qemu_domain.h | 3 +-- src/qemu/qemu_snapshot.c | 28 +++++++++++----------------- 3 files changed, 17 insertions(+), 29 deletions(-) diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index e8e72e5091..5fdd7f9fc0 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -5708,8 +5708,7 @@ qemuDomainSnapshotWriteMetadata(virDomainObj *vm, /* The domain is expected to be locked and inactive. Return -1 on normal * failure, 1 if we skipped a disk due to try_all. */ static int -qemuDomainSnapshotForEachQcow2Raw(virQEMUDriver *driver, - virDomainDef *def, +qemuDomainSnapshotForEachQcow2Raw(virDomainDef *def, virDomainMomentObj *snap, const char *op, bool try_all, @@ -5748,8 +5747,7 @@ qemuDomainSnapshotForEachQcow2Raw(virQEMUDriver *driver, } else if (STREQ(op, "-c") && i) { /* We must roll back partial creation by deleting * all earlier snapshots. */ - qemuDomainSnapshotForEachQcow2Raw(driver, def, snap, - "-d", false, i); + qemuDomainSnapshotForEachQcow2Raw(def, snap, "-d", false, i); } virReportError(VIR_ERR_OPERATION_INVALID, _("Disk device '%1$s' does not support snapshotting"), @@ -5768,8 +5766,7 @@ qemuDomainSnapshotForEachQcow2Raw(virQEMUDriver *driver, } else if (STREQ(op, "-c") && i) { /* We must roll back partial creation by deleting * all earlier snapshots. */ - qemuDomainSnapshotForEachQcow2Raw(driver, def, snap, - "-d", false, i); + qemuDomainSnapshotForEachQcow2Raw(def, snap, "-d", false, i); } return -1; } @@ -5781,14 +5778,12 @@ qemuDomainSnapshotForEachQcow2Raw(virQEMUDriver *driver, /* The domain is expected to be locked and inactive. Return -1 on normal * failure, 1 if we skipped a disk due to try_all. */ int -qemuDomainSnapshotForEachQcow2(virQEMUDriver *driver, - virDomainDef *def, +qemuDomainSnapshotForEachQcow2(virDomainDef *def, virDomainMomentObj *snap, const char *op, bool try_all) { - return qemuDomainSnapshotForEachQcow2Raw(driver, def, snap, - op, try_all, def->ndisks); + return qemuDomainSnapshotForEachQcow2Raw(def, snap, op, try_all, def->ndisks); } /* Hash iterator callback to discard multiple snapshots. */ diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index 091b27823b..ad0f4341c0 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -670,8 +670,7 @@ int qemuDomainSnapshotWriteMetadata(virDomainObj *vm, virDomainXMLOption *xmlopt, const char *snapshotDir); -int qemuDomainSnapshotForEachQcow2(virQEMUDriver *driver, - virDomainDef *def, +int qemuDomainSnapshotForEachQcow2(virDomainDef *def, virDomainMomentObj *snap, const char *op, bool try_all); diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index 35c8d67d20..f1d5103abe 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -251,11 +251,10 @@ qemuSnapshotCreateQcow2Files(virDomainDef *def, /* The domain is expected to be locked and inactive. */ static int -qemuSnapshotCreateInactiveInternal(virQEMUDriver *driver, - virDomainObj *vm, +qemuSnapshotCreateInactiveInternal(virDomainObj *vm, virDomainMomentObj *snap) { - return qemuDomainSnapshotForEachQcow2(driver, vm->def, snap, "-c", false); + return qemuDomainSnapshotForEachQcow2(vm->def, snap, "-c", false); } @@ -1935,7 +1934,7 @@ qemuSnapshotCreate(virDomainObj *vm, if (qemuSnapshotCreateInactiveExternal(driver, vm, snap, reuse) < 0) goto error; } else { - if (qemuSnapshotCreateInactiveInternal(driver, vm, snap) < 0) + if (qemuSnapshotCreateInactiveInternal(vm, snap) < 0) goto error; } } @@ -2533,8 +2532,7 @@ qemuSnapshotRevertActive(virDomainObj *vm, /* The domain is expected to be locked and inactive. */ static int -qemuSnapshotInternalRevertInactive(virQEMUDriver *driver, - virDomainObj *vm, +qemuSnapshotInternalRevertInactive(virDomainObj *vm, virDomainMomentObj *snap) { size_t i; @@ -2553,7 +2551,7 @@ qemuSnapshotInternalRevertInactive(virQEMUDriver *driver, } /* Try all disks, but report failure if we skipped any. */ - if (qemuDomainSnapshotForEachQcow2(driver, def, snap, "-a", true) != 0) + if (qemuDomainSnapshotForEachQcow2(def, snap, "-a", true) != 0) return -1; return 0; @@ -2611,7 +2609,7 @@ qemuSnapshotRevertInactive(virDomainObj *vm, qemuSnapshotRevertExternalFinish(vm, tmpsnapdef, snap); } else { - if (qemuSnapshotInternalRevertInactive(driver, vm, snap) < 0) { + if (qemuSnapshotInternalRevertInactive(vm, snap) < 0) { qemuDomainRemoveInactive(driver, vm, 0, false); return -1; } @@ -3893,8 +3891,7 @@ qemuSnapshotDiscardActiveInternal(virDomainObj *vm, /* Discard one snapshot (or its metadata), without reparenting any children. */ static int -qemuSnapshotDiscardImpl(virQEMUDriver *driver, - virDomainObj *vm, +qemuSnapshotDiscardImpl(virDomainObj *vm, virDomainMomentObj *snap, GSList *externalData, bool update_parent, @@ -3922,7 +3919,7 @@ qemuSnapshotDiscardImpl(virQEMUDriver *driver, if (qemuSnapshotDiscardExternal(vm, snap, externalData) < 0) return -1; } else { - if (qemuDomainSnapshotForEachQcow2(driver, def, snap, "-d", true) < 0) + if (qemuDomainSnapshotForEachQcow2(def, snap, "-d", true) < 0) return -1; } } else { @@ -3961,13 +3958,13 @@ qemuSnapshotDiscardImpl(virQEMUDriver *driver, static int -qemuSnapshotDiscard(virQEMUDriver *driver, +qemuSnapshotDiscard(virQEMUDriver *driver G_GNUC_UNUSED, virDomainObj *vm, virDomainMomentObj *snap, bool update_parent, bool metadata_only) { - return qemuSnapshotDiscardImpl(driver, vm, snap, NULL, update_parent, metadata_only); + return qemuSnapshotDiscardImpl(vm, snap, NULL, update_parent, metadata_only); } @@ -3995,10 +3992,7 @@ qemuSnapshotDeleteSingle(virDomainObj *vm, GSList *externalData, bool metadata_only) { - qemuDomainObjPrivate *priv = vm->privateData; - virQEMUDriver *driver = priv->driver; - - return qemuSnapshotDiscardImpl(driver, vm, snap, externalData, true, metadata_only); + return qemuSnapshotDiscardImpl(vm, snap, externalData, true, metadata_only); } -- 2.47.0

The functions are exclusively used in the snapshot module. Move and rename them: qemuDomainSnapshotForEachQcow2Raw -> qemuSnapshotForEachQcow2Internal qemuDomainSnapshotForEachQcow2 -> qemuSnapshotForEachQcow2 Signed-off-by: Peter Krempa <pkrempa@redhat.com> --- src/qemu/qemu_domain.c | 81 ------------------------------------ src/qemu/qemu_domain.h | 5 --- src/qemu/qemu_snapshot.c | 89 ++++++++++++++++++++++++++++++++++++++-- 3 files changed, 86 insertions(+), 89 deletions(-) diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 5fdd7f9fc0..4c0bfcd172 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -5705,87 +5705,6 @@ qemuDomainSnapshotWriteMetadata(virDomainObj *vm, } -/* The domain is expected to be locked and inactive. Return -1 on normal - * failure, 1 if we skipped a disk due to try_all. */ -static int -qemuDomainSnapshotForEachQcow2Raw(virDomainDef *def, - virDomainMomentObj *snap, - const char *op, - bool try_all, - int ndisks) -{ - virDomainSnapshotDef *snapdef = virDomainSnapshotObjGetDef(snap); - size_t i; - bool skipped = false; - - for (i = 0; i < ndisks; i++) { - g_autoptr(virCommand) cmd = virCommandNewArgList("qemu-img", "snapshot", - op, snap->def->name, NULL); - int format = virDomainDiskGetFormat(def->disks[i]); - - /* FIXME: we also need to handle LVM here */ - if (def->disks[i]->device != VIR_DOMAIN_DISK_DEVICE_DISK || - snapdef->disks[i].snapshot != VIR_DOMAIN_SNAPSHOT_LOCATION_INTERNAL) - continue; - - if (!virStorageSourceIsLocalStorage(def->disks[i]->src)) { - virReportError(VIR_ERR_OPERATION_INVALID, - _("can't manipulate inactive snapshots of disk '%1$s'"), - def->disks[i]->dst); - return -1; - } - - if (format > 0 && format != VIR_STORAGE_FILE_QCOW2) { - if (try_all) { - /* Continue on even in the face of error, since other - * disks in this VM may have the same snapshot name. - */ - VIR_WARN("skipping snapshot action on %s", - def->disks[i]->dst); - skipped = true; - continue; - } else if (STREQ(op, "-c") && i) { - /* We must roll back partial creation by deleting - * all earlier snapshots. */ - qemuDomainSnapshotForEachQcow2Raw(def, snap, "-d", false, i); - } - virReportError(VIR_ERR_OPERATION_INVALID, - _("Disk device '%1$s' does not support snapshotting"), - def->disks[i]->dst); - return -1; - } - - virCommandAddArg(cmd, virDomainDiskGetSource(def->disks[i])); - - if (virCommandRun(cmd, NULL) < 0) { - if (try_all) { - VIR_WARN("skipping snapshot action on %s", - def->disks[i]->dst); - skipped = true; - continue; - } else if (STREQ(op, "-c") && i) { - /* We must roll back partial creation by deleting - * all earlier snapshots. */ - qemuDomainSnapshotForEachQcow2Raw(def, snap, "-d", false, i); - } - return -1; - } - } - - return skipped ? 1 : 0; -} - -/* The domain is expected to be locked and inactive. Return -1 on normal - * failure, 1 if we skipped a disk due to try_all. */ -int -qemuDomainSnapshotForEachQcow2(virDomainDef *def, - virDomainMomentObj *snap, - const char *op, - bool try_all) -{ - return qemuDomainSnapshotForEachQcow2Raw(def, snap, op, try_all, def->ndisks); -} - /* Hash iterator callback to discard multiple snapshots. */ int qemuDomainMomentDiscardAll(void *payload, const char *name G_GNUC_UNUSED, diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index ad0f4341c0..e810f79599 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -670,11 +670,6 @@ int qemuDomainSnapshotWriteMetadata(virDomainObj *vm, virDomainXMLOption *xmlopt, const char *snapshotDir); -int qemuDomainSnapshotForEachQcow2(virDomainDef *def, - virDomainMomentObj *snap, - const char *op, - bool try_all); - typedef struct _virQEMUMomentRemove virQEMUMomentRemove; struct _virQEMUMomentRemove { virQEMUDriver *driver; diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index f1d5103abe..222afe62e1 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -249,12 +249,95 @@ qemuSnapshotCreateQcow2Files(virDomainDef *def, } +/* The domain is expected to be locked and inactive. Return -1 on normal + * failure, 1 if we skipped a disk due to try_all. */ +static int +qemuSnapshotForEachQcow2Internal(virDomainDef *def, + virDomainMomentObj *snap, + const char *op, + bool try_all, + int ndisks) +{ + virDomainSnapshotDef *snapdef = virDomainSnapshotObjGetDef(snap); + size_t i; + bool skipped = false; + + for (i = 0; i < ndisks; i++) { + g_autoptr(virCommand) cmd = virCommandNewArgList("qemu-img", "snapshot", + op, snap->def->name, NULL); + int format = virDomainDiskGetFormat(def->disks[i]); + + /* FIXME: we also need to handle LVM here */ + if (def->disks[i]->device != VIR_DOMAIN_DISK_DEVICE_DISK || + snapdef->disks[i].snapshot != VIR_DOMAIN_SNAPSHOT_LOCATION_INTERNAL) + continue; + + if (!virStorageSourceIsLocalStorage(def->disks[i]->src)) { + virReportError(VIR_ERR_OPERATION_INVALID, + _("can't manipulate inactive snapshots of disk '%1$s'"), + def->disks[i]->dst); + return -1; + } + + if (format > 0 && format != VIR_STORAGE_FILE_QCOW2) { + if (try_all) { + /* Continue on even in the face of error, since other + * disks in this VM may have the same snapshot name. + */ + VIR_WARN("skipping snapshot action on %s", + def->disks[i]->dst); + skipped = true; + continue; + } else if (STREQ(op, "-c") && i) { + /* We must roll back partial creation by deleting + * all earlier snapshots. */ + qemuSnapshotForEachQcow2Internal(def, snap, "-d", false, i); + } + virReportError(VIR_ERR_OPERATION_INVALID, + _("Disk device '%1$s' does not support snapshotting"), + def->disks[i]->dst); + return -1; + } + + virCommandAddArg(cmd, virDomainDiskGetSource(def->disks[i])); + + if (virCommandRun(cmd, NULL) < 0) { + if (try_all) { + VIR_WARN("skipping snapshot action on %s", + def->disks[i]->dst); + skipped = true; + continue; + } else if (STREQ(op, "-c") && i) { + /* We must roll back partial creation by deleting + * all earlier snapshots. */ + qemuSnapshotForEachQcow2Internal(def, snap, "-d", false, i); + } + return -1; + } + } + + return skipped ? 1 : 0; +} + + +/* The domain is expected to be locked and inactive. Return -1 on normal + * failure, 1 if we skipped a disk due to try_all. */ +static int +qemuSnapshotForEachQcow2(virDomainDef *def, + virDomainMomentObj *snap, + const char *op, + bool try_all) +{ + return qemuSnapshotForEachQcow2Internal(def, snap, op, try_all, def->ndisks); +} + + /* The domain is expected to be locked and inactive. */ static int qemuSnapshotCreateInactiveInternal(virDomainObj *vm, virDomainMomentObj *snap) { - return qemuDomainSnapshotForEachQcow2(vm->def, snap, "-c", false); + return qemuSnapshotForEachQcow2(vm->def, snap, "-c", false); } @@ -2551,7 +2634,7 @@ qemuSnapshotInternalRevertInactive(virDomainObj *vm, } /* Try all disks, but report failure if we skipped any. */ - if (qemuDomainSnapshotForEachQcow2(def, snap, "-a", true) != 0) + if (qemuSnapshotForEachQcow2(def, snap, "-a", true) != 0) return -1; return 0; @@ -3919,7 +4002,7 @@ qemuSnapshotDiscardImpl(virDomainObj *vm, if (qemuSnapshotDiscardExternal(vm, snap, externalData) < 0) return -1; } else { - if (qemuDomainSnapshotForEachQcow2(def, snap, "-d", true) < 0) + if (qemuSnapshotForEachQcow2(def, snap, "-d", true) < 0) return -1; } } else { -- 2.47.0

Refactor the function to avoid recursive call to rollback and simplify calling parameters. To achieve that most of the fatal checks are extracted into a dedicated loop that runs before modifying the disk state thus removing the need to rollback altoghether. Since rollback is still necessary when creation of the snapshot fails half-way through the rollback is extracted to handle only that scenario. Additionally callers would only pass the old 'try_all' argument as true on all non-creation ("-c") modes. This means that we can infer it from the operation instead of passing it as an extra argument. This refactor will also make it much simpler to implement handling of the NVRAM pflash backing file (in case it's qcow2) for internal snapshots. Signed-off-by: Peter Krempa <pkrempa@redhat.com> --- src/qemu/qemu_snapshot.c | 141 ++++++++++++++++++++++++--------------- 1 file changed, 88 insertions(+), 53 deletions(-) diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index 222afe62e1..f560cf270c 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -249,86 +249,121 @@ qemuSnapshotCreateQcow2Files(virDomainDef *def, } -/* The domain is expected to be locked and inactive. Return -1 on normal - * failure, 1 if we skipped a disk due to try_all. */ static int -qemuSnapshotForEachQcow2Internal(virDomainDef *def, - virDomainMomentObj *snap, - const char *op, - bool try_all, - int ndisks) +qemuSnapshotForEachQcow2One(virStorageSource *src, + const char *op, + const char *snapname) +{ + g_autoptr(virCommand) cmd = NULL; + + cmd = virCommandNewArgList("qemu-img", "snapshot", + op, snapname, src->path, NULL); + + if (virCommandRun(cmd, NULL) < 0) + return -1; + + return 0; +} + + +/** + * qemuSnapshotForEachQcow2: + * + * @def: domain definition + * @snap: snapshot object + * @op: 'qemu-img snapshot' operation flag, one of "-c", "-d", "-a" + * + * Applies the selected 'qemu-img snapshot' operation @op on all relevant QCOW2 + * images of @def. In case when @op is "-c" (create) any failure is fatal and + * rolled back. Otherwise the selected operation @op is applied on all images + * regardless of failure. + * + * Returns: -1 on errror; 0 on complete success; 1 on partial success in + * permissive modes. + */ +static int +qemuSnapshotForEachQcow2(virDomainDef *def, + virDomainMomentObj *snap, + const char *op) { virDomainSnapshotDef *snapdef = virDomainSnapshotObjGetDef(snap); size_t i; bool skipped = false; + bool create = STREQ(op, "-c"); + size_t nrollback = -1; + virErrorPtr orig_err; - for (i = 0; i < ndisks; i++) { - g_autoptr(virCommand) cmd = virCommandNewArgList("qemu-img", "snapshot", - op, snap->def->name, NULL); - int format = virDomainDiskGetFormat(def->disks[i]); + /* pre-checks */ + for (i = 0; i < def->ndisks; i++) { + virDomainDiskDef *disk = def->disks[i]; - /* FIXME: we also need to handle LVM here */ if (def->disks[i]->device != VIR_DOMAIN_DISK_DEVICE_DISK || snapdef->disks[i].snapshot != VIR_DOMAIN_SNAPSHOT_LOCATION_INTERNAL) continue; - if (!virStorageSourceIsLocalStorage(def->disks[i]->src)) { + if (!virStorageSourceIsLocalStorage(disk->src)) { virReportError(VIR_ERR_OPERATION_INVALID, _("can't manipulate inactive snapshots of disk '%1$s'"), - def->disks[i]->dst); + disk->dst); return -1; } - if (format > 0 && format != VIR_STORAGE_FILE_QCOW2) { - if (try_all) { - /* Continue on even in the face of error, since other - * disks in this VM may have the same snapshot name. - */ - VIR_WARN("skipping snapshot action on %s", - def->disks[i]->dst); - skipped = true; - continue; - } else if (STREQ(op, "-c") && i) { - /* We must roll back partial creation by deleting - * all earlier snapshots. */ - qemuSnapshotForEachQcow2Internal(def, snap, "-d", false, i); - } + if (create && + disk->src->format > VIR_STORAGE_FILE_NONE && + disk->src->format != VIR_STORAGE_FILE_QCOW2) { virReportError(VIR_ERR_OPERATION_INVALID, _("Disk device '%1$s' does not support snapshotting"), - def->disks[i]->dst); + disk->dst); return -1; } + } + + for (i = 0; i < def->ndisks; i++) { + virDomainDiskDef *disk = def->disks[i]; + + if (def->disks[i]->device != VIR_DOMAIN_DISK_DEVICE_DISK || + snapdef->disks[i].snapshot != VIR_DOMAIN_SNAPSHOT_LOCATION_INTERNAL) + continue; - virCommandAddArg(cmd, virDomainDiskGetSource(def->disks[i])); + if (disk->src->format > VIR_STORAGE_FILE_NONE && + disk->src->format != VIR_STORAGE_FILE_QCOW2) { + VIR_WARN("skipping 'qemu-img snapshot %s' action on non-qcow2 disk '%s'", + op, disk->dst); + skipped = true; + continue; + } - if (virCommandRun(cmd, NULL) < 0) { - if (try_all) { - VIR_WARN("skipping snapshot action on %s", - def->disks[i]->dst); + if (qemuSnapshotForEachQcow2One(disk->src, op, snap->def->name) < 0) { + if (create) { + nrollback = i; + virErrorPreserveLast(&orig_err); + goto rollback; + } else { + VIR_WARN("failed 'qemu-img snapshot %s' action on '%s'", + op, disk->dst); skipped = true; - continue; - } else if (STREQ(op, "-c") && i) { - /* We must roll back partial creation by deleting - * all earlier snapshots. */ - qemuSnapshotForEachQcow2Internal(def, snap, "-d", false, i); + virResetLastError(); } - return -1; } } return skipped ? 1 : 0; -} + rollback: + for (i = 0; i < nrollback; i++) { + virDomainDiskDef *disk = def->disks[i]; -/* The domain is expected to be locked and inactive. Return -1 on normal - * failure, 1 if we skipped a disk due to try_all. */ -static int -qemuSnapshotForEachQcow2(virDomainDef *def, - virDomainMomentObj *snap, - const char *op, - bool try_all) -{ - return qemuSnapshotForEachQcow2Internal(def, snap, op, try_all, def->ndisks); + if (def->disks[i]->device != VIR_DOMAIN_DISK_DEVICE_DISK || + snapdef->disks[i].snapshot != VIR_DOMAIN_SNAPSHOT_LOCATION_INTERNAL || + (disk->src->format > VIR_STORAGE_FILE_NONE && + disk->src->format != VIR_STORAGE_FILE_QCOW2)) + continue; + + ignore_value(qemuSnapshotForEachQcow2One(disk->src, "-d", snap->def->name)); + } + + virErrorRestore(&orig_err); + return -1; } @@ -337,7 +372,7 @@ static int qemuSnapshotCreateInactiveInternal(virDomainObj *vm, virDomainMomentObj *snap) { - return qemuSnapshotForEachQcow2(vm->def, snap, "-c", false); + return qemuSnapshotForEachQcow2(vm->def, snap, "-c"); } @@ -2634,7 +2669,7 @@ qemuSnapshotInternalRevertInactive(virDomainObj *vm, } /* Try all disks, but report failure if we skipped any. */ - if (qemuSnapshotForEachQcow2(def, snap, "-a", true) != 0) + if (qemuSnapshotForEachQcow2(def, snap, "-a") != 0) return -1; return 0; @@ -4002,7 +4037,7 @@ qemuSnapshotDiscardImpl(virDomainObj *vm, if (qemuSnapshotDiscardExternal(vm, snap, externalData) < 0) return -1; } else { - if (qemuSnapshotForEachQcow2(def, snap, "-d", true) < 0) + if (qemuSnapshotForEachQcow2(def, snap, "-d") < 0) return -1; } } else { -- 2.47.0

The live VM snapshot code already does handle the NVRAM image when it's in use, so we should also handle it when modifying/creating the snapshots via qemu-img when inactive. Add the handling to qemuSnapshotForEachQcow2 which is used for all inactive operations. Signed-off-by: Peter Krempa <pkrempa@redhat.com> --- src/qemu/qemu_snapshot.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index f560cf270c..4a17935627 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -347,6 +347,26 @@ qemuSnapshotForEachQcow2(virDomainDef *def, } } + if (def->os.loader && def->os.loader->nvram) { + virStorageSource *nvram = def->os.loader->nvram; + + if (virStorageSourceIsLocalStorage(nvram) && + nvram->format == VIR_STORAGE_FILE_QCOW2) { + if (qemuSnapshotForEachQcow2One(nvram, op, snap->def->name) < 0) { + if (create) { + nrollback = def->ndisks; + virErrorPreserveLast(&orig_err); + goto rollback; + } else { + VIR_WARN("failed 'qemu-img snapshot %s' action on NVRAM image", + op); + skipped = true; + virResetLastError(); + } + } + } + } + return skipped ? 1 : 0; rollback: -- 2.47.0

Libvirt currently loads snapshots via the '-loadvm' commandline option but that uses the same logic as the 'loadvm' text monitor command used to pick the disk image with the 'vmstate' section. Since libvirt now implements our own logic to pick the 'vmstate' device it can happen that we pick a different than qemu and thus qemu would fail to load the snapshot. This happens currently on VMs with UEFI firmware with NVRAM image in qcow2 format. To fix this libvirt will need to use the 'snapshot-load' QMP command instead of relying on '-savevm'. Implement the monitor bits for 'snapshot-load'. Signed-off-by: Peter Krempa <pkrempa@redhat.com> --- src/qemu/qemu_monitor.c | 16 ++++++++++++++++ src/qemu/qemu_monitor.h | 7 +++++++ src/qemu/qemu_monitor_json.c | 31 +++++++++++++++++++++++++++++++ src/qemu/qemu_monitor_json.h | 7 +++++++ 4 files changed, 61 insertions(+) diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index fd888e2468..73f37d26eb 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -2780,6 +2780,22 @@ qemuMonitorSnapshotSave(qemuMonitor *mon, } +int +qemuMonitorSnapshotLoad(qemuMonitor *mon, + const char *jobname, + const char *snapshotname, + const char *vmstate_disk, + const char **disks) +{ + VIR_DEBUG("jobname='%s', snapshotname='%s', vmstate_disk='%s'", + jobname, snapshotname, vmstate_disk); + + QEMU_CHECK_MONITOR(mon); + + return qemuMonitorJSONSnapshotLoad(mon, jobname, snapshotname, vmstate_disk, disks); +} + + int qemuMonitorSnapshotDelete(qemuMonitor *mon, const char *jobname, diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 035c9a7e3c..aded771315 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -1626,6 +1626,13 @@ qemuMonitorSnapshotSave(qemuMonitor *mon, const char *vmstate_disk, const char **disks); +int +qemuMonitorSnapshotLoad(qemuMonitor *mon, + const char *jobname, + const char *snapshotname, + const char *vmstate_disk, + const char **disks); + int qemuMonitorSnapshotDelete(qemuMonitor *mon, const char *jobname, diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index c594b33106..b3924461a9 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -8754,6 +8754,37 @@ qemuMonitorJSONSnapshotSave(qemuMonitor *mon, } +int +qemuMonitorJSONSnapshotLoad(qemuMonitor *mon, + const char *jobname, + const char *snapshotname, + const char *vmstate_disk, + const char **disks) +{ + g_autoptr(virJSONValue) cmd = NULL; + g_autoptr(virJSONValue) reply = NULL; + g_autoptr(virJSONValue) devices = virJSONValueNewArray(); + + for (; *disks; disks++) { + if (virJSONValueArrayAppendString(devices, *disks) < 0) + return -1; + } + + if (!(cmd = qemuMonitorJSONMakeCommand("snapshot-load", + "s:job-id", jobname, + "s:tag", snapshotname, + "s:vmstate", vmstate_disk, + "a:devices", &devices, + NULL))) + return -1; + + if (qemuMonitorJSONCommand(mon, cmd, &reply) < 0) + return -1; + + return qemuMonitorJSONCheckError(cmd, reply); +} + + int qemuMonitorJSONSnapshotDelete(qemuMonitor *mon, const char *jobname, diff --git a/src/qemu/qemu_monitor_json.h b/src/qemu/qemu_monitor_json.h index ab3c2cb7c8..0214e9e9ff 100644 --- a/src/qemu/qemu_monitor_json.h +++ b/src/qemu/qemu_monitor_json.h @@ -804,6 +804,13 @@ qemuMonitorJSONSnapshotSave(qemuMonitor *mon, const char *vmstate_disk, const char **disks); +int +qemuMonitorJSONSnapshotLoad(qemuMonitor *mon, + const char *jobname, + const char *snapshotname, + const char *vmstate_disk, + const char **disks); + int qemuMonitorJSONSnapshotDelete(qemuMonitor *mon, const char *jobname, -- 2.47.0

The internal snapshot code will use the 'snapshot-load' command so we need to add the corresponding job type. Signed-off-by: Peter Krempa <pkrempa@redhat.com> --- src/qemu/qemu_block.c | 1 + src/qemu/qemu_blockjob.c | 2 ++ src/qemu/qemu_blockjob.h | 1 + src/qemu/qemu_domain.c | 2 ++ 4 files changed, 6 insertions(+) diff --git a/src/qemu/qemu_block.c b/src/qemu/qemu_block.c index 692b4d350e..3c1305ec84 100644 --- a/src/qemu/qemu_block.c +++ b/src/qemu/qemu_block.c @@ -3777,6 +3777,7 @@ qemuBlockPivot(virDomainObj *vm, case QEMU_BLOCKJOB_TYPE_CREATE: case QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE: case QEMU_BLOCKJOB_TYPE_SNAPSHOT_DELETE: + case QEMU_BLOCKJOB_TYPE_SNAPSHOT_LOAD: case QEMU_BLOCKJOB_TYPE_BROKEN: virReportError(VIR_ERR_OPERATION_INVALID, _("job type '%1$s' does not support pivot"), diff --git a/src/qemu/qemu_blockjob.c b/src/qemu/qemu_blockjob.c index 6e53603fba..c35321790e 100644 --- a/src/qemu/qemu_blockjob.c +++ b/src/qemu/qemu_blockjob.c @@ -70,6 +70,7 @@ VIR_ENUM_IMPL(qemuBlockjob, "create", "snapshot-save", "snapshot-delete", + "snapshot-load", "broken"); static virClass *qemuBlockJobDataClass; @@ -1459,6 +1460,7 @@ qemuBlockJobEventProcessConcludedTransition(qemuBlockJobData *job, case QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE: case QEMU_BLOCKJOB_TYPE_SNAPSHOT_DELETE: + case QEMU_BLOCKJOB_TYPE_SNAPSHOT_LOAD: /* The internal snapshot jobs don't need any extra handling */ break; diff --git a/src/qemu/qemu_blockjob.h b/src/qemu/qemu_blockjob.h index 6620e08c47..572a838676 100644 --- a/src/qemu/qemu_blockjob.h +++ b/src/qemu/qemu_blockjob.h @@ -67,6 +67,7 @@ typedef enum { QEMU_BLOCKJOB_TYPE_CREATE, QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE, QEMU_BLOCKJOB_TYPE_SNAPSHOT_DELETE, + QEMU_BLOCKJOB_TYPE_SNAPSHOT_LOAD, QEMU_BLOCKJOB_TYPE_BROKEN, QEMU_BLOCKJOB_TYPE_LAST } qemuBlockJobType; diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 4c0bfcd172..dfbf3dd9f0 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -2478,6 +2478,7 @@ qemuDomainObjPrivateXMLFormatBlockjobIterator(void *payload, case QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE: case QEMU_BLOCKJOB_TYPE_SNAPSHOT_DELETE: + case QEMU_BLOCKJOB_TYPE_SNAPSHOT_LOAD: /* No private data for internal snapshot jobs */ break; @@ -3035,6 +3036,7 @@ qemuDomainObjPrivateXMLParseBlockjobDataSpecific(qemuBlockJobData *job, case QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE: case QEMU_BLOCKJOB_TYPE_SNAPSHOT_DELETE: + case QEMU_BLOCKJOB_TYPE_SNAPSHOT_LOAD: /* No extra data for internal snapshot jobs. */ break; -- 2.47.0

Refactor the parts of qemuBlockGetNamedNodeData which fetch the names of internal snapshots present in the on-disk state of QCOW2 images to also extract the presence of the 'vmstate' section. This requires conversion of the snapshot list to a hash table as we always know the name of the snapshot that we're looking for, and the hash table allows also storing of additional data which we'll use to store the presence of the 'vmstate'. Signed-off-by: Peter Krempa <pkrempa@redhat.com> --- src/qemu/qemu_monitor.h | 12 ++++++++++-- src/qemu/qemu_monitor_json.c | 18 ++++++++++++++---- src/qemu/qemu_snapshot.c | 6 ++---- tests/qemublocktest.c | 14 +++++++++++--- .../bitmap/snapshots-internal.out | 2 +- 5 files changed, 38 insertions(+), 14 deletions(-) diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index aded771315..89a59dfd27 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -701,6 +701,13 @@ struct _qemuBlockNamedNodeDataBitmap { unsigned long long granularity; }; + +typedef struct _qemuBlockNamedNodeDataSnapshot qemuBlockNamedNodeDataSnapshot; +struct _qemuBlockNamedNodeDataSnapshot { + bool vmstate; +}; + + typedef struct _qemuBlockNamedNodeData qemuBlockNamedNodeData; struct _qemuBlockNamedNodeData { unsigned long long capacity; @@ -709,8 +716,9 @@ struct _qemuBlockNamedNodeData { qemuBlockNamedNodeDataBitmap **bitmaps; size_t nbitmaps; - /* NULL terminated string list of internal snapshot names */ - char **snapshots; + /* hash table indexed by snapshot name containing data about snapshots + * (qemuBlockNamedNodeDataSnapshot) */ + GHashTable *snapshots; /* the cluster size of the image is valid only when > 0 */ unsigned long long clusterSize; diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index b3924461a9..1b4288b744 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -2569,7 +2569,7 @@ qemuMonitorJSONBlockNamedNodeDataFree(qemuBlockNamedNodeData *data) for (i = 0; i < data->nbitmaps; i++) qemuMonitorJSONBlockNamedNodeDataBitmapFree(data->bitmaps[i]); - g_strfreev(data->snapshots); + g_clear_pointer(&data->snapshots, g_hash_table_unref); g_free(data->bitmaps); g_free(data); } @@ -2658,19 +2658,29 @@ qemuMonitorJSONBlockGetNamedNodeDataWorker(size_t pos G_GNUC_UNUSED, if ((snapshots = virJSONValueObjectGetArray(img, "snapshots"))) { size_t nsnapshots = virJSONValueArraySize(snapshots); - size_t nsnapnames = 0; size_t i; - ent->snapshots = g_new0(char *, nsnapshots + 1); + ent->snapshots = virHashNew(g_free); for (i = 0; i < nsnapshots; i++) { virJSONValue *snapshot = virJSONValueArrayGet(snapshots, i); const char *name = virJSONValueObjectGetString(snapshot, "name"); + unsigned long long vmstate_size = 0; + qemuBlockNamedNodeDataSnapshot *snd; if (!name) continue; - ent->snapshots[nsnapnames++] = g_strdup(name); + ignore_value(virJSONValueObjectGetNumberUlong(snapshot, + "vm-state-size", + &vmstate_size)); + + snd = g_new0(qemuBlockNamedNodeDataSnapshot, 1); + + if (vmstate_size > 0) + snd->vmstate = true; + + g_hash_table_insert(ent->snapshots, g_strdup(name), snd); } } diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index 4a17935627..aab06a09c6 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -3897,8 +3897,7 @@ qemuSnapshotActiveInternalDeleteGetDevices(virDomainObj *vm, continue; /* there might be no snapshot for given disk with given name */ - if (!d->snapshots || - !g_strv_contains((const char **) d->snapshots, snapname)) + if (!virHashHasEntry(d->snapshots, snapname)) continue; devices[ndevs++] = g_strdup(format_nodename); @@ -3913,8 +3912,7 @@ qemuSnapshotActiveInternalDeleteGetDevices(virDomainObj *vm, if ((format_nodename = qemuBlockStorageSourceGetFormatNodename(vm->def->os.loader->nvram)) && (d = virHashLookup(blockNamedNodeData, format_nodename)) && - d->snapshots && - g_strv_contains((const char **) d->snapshots, snapname)) { + virHashHasEntry(d->snapshots, snapname)) { devices[ndevs++] = g_strdup(format_nodename); } } diff --git a/tests/qemublocktest.c b/tests/qemublocktest.c index ac4d87b527..be3e421ac0 100644 --- a/tests/qemublocktest.c +++ b/tests/qemublocktest.c @@ -598,12 +598,20 @@ testQemuDetectBitmapsWorker(GHashTable *nodedata, } if (data->snapshots) { - char **sn; + g_autofree virHashKeyValuePair *snaps = virHashGetItems(data->snapshots, NULL, true); + virHashKeyValuePair *n; virBufferAddLit(buf, "internal snapshots:"); - for (sn = data->snapshots; *sn; sn++) - virBufferAsprintf(buf, " '%s'", *sn); + for (n = snaps; n->key; n++) { + const qemuBlockNamedNodeDataSnapshot *d = n->value; + const char *vms = ""; + + if (d->vmstate) + vms = "(*)"; + + virBufferAsprintf(buf, " '%s'%s", (const char *) n->key, vms); + } } virBufferAdjustIndent(buf, -1); diff --git a/tests/qemublocktestdata/bitmap/snapshots-internal.out b/tests/qemublocktestdata/bitmap/snapshots-internal.out index f2fb0a1dcc..dbb3cfded4 100644 --- a/tests/qemublocktestdata/bitmap/snapshots-internal.out +++ b/tests/qemublocktestdata/bitmap/snapshots-internal.out @@ -1,2 +1,2 @@ libvirt-1-format: - internal snapshots: '1727868651' '1727872064' + internal snapshots: '1727868651'(*) '1727872064'(*) -- 2.47.0

The '-loadvm' commandline parameter has exactly the same semantics as the HMP 'loadvm' command. This includes the selection of which block device is considered to contain the 'vmstate' section. Since libvirt recently switched to the new QMP commands which allow a free selection of where the 'vmstate' is placed, snapshot reversion will no longer work if libvirt's algorithm disagrees with qemu's. This is the case when the VM has UEFI NVRAM image, in qcow2 format, present. To solve this we'll use the QMP counterpart 'snapshot-load' to load the snapshot instead of using '-loadvm'. We'll do this before resuming processors after startup of qemu and thus the behaviour is identical to what we had before. The logic for selecting the images now checks both the snapshot metadata and the VM definition. In case images not covered by the snapshot definition do have the snapshot it's included in the reversion, but it's fatal if the snapshot is not present in a disk covered in snapshot metadata. The vmstate is selected based on where it's present as libvirt doesn't store this information. Signed-off-by: Peter Krempa <pkrempa@redhat.com> --- src/qemu/qemu_command.c | 5 +- src/qemu/qemu_process.c | 7 ++ src/qemu/qemu_snapshot.c | 232 +++++++++++++++++++++++++++++++++++++++ src/qemu/qemu_snapshot.h | 5 + 4 files changed, 248 insertions(+), 1 deletion(-) diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c index 696f891b47..f4430275dc 100644 --- a/src/qemu/qemu_command.c +++ b/src/qemu/qemu_command.c @@ -10567,7 +10567,10 @@ qemuBuildCommandLine(virDomainObj *vm, if (qemuBuildSecCommandLine(vm, cmd, def->sec) < 0) return NULL; - if (snapshot) + /* Internal snapshot reversion happens via QMP command after startup if + * supported */ + if (snapshot && + !virQEMUCapsGet(qemuCaps, QEMU_CAPS_SNAPSHOT_INTERNAL_QMP)) virCommandAddArgList(cmd, "-loadvm", snapshot->def->name, NULL); if (def->namespaceData) { diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 0815bffe3c..1fda8bdd65 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -7944,6 +7944,13 @@ qemuProcessLaunch(virConnectPtr conn, qemuDomainVcpuPersistOrder(vm->def); + if (snapshot && + virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_SNAPSHOT_INTERNAL_QMP)) { + VIR_DEBUG("reverting internal snapshot via QMP"); + if (qemuSnapshotInternalRevert(vm, snapshot, asyncJob) < 0) + goto cleanup; + } + VIR_DEBUG("Verifying and updating provided guest CPU"); if (qemuProcessUpdateAndVerifyCPU(vm, asyncJob) < 0) goto cleanup; diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index aab06a09c6..5b3aadcbf0 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -4361,3 +4361,235 @@ qemuSnapshotDelete(virDomainObj *vm, return ret; } + + +static char ** +qemuSnapshotInternalRevertGetDevices(virDomainObj *vm, + virDomainSnapshotDef *snapdef, + char **vmstate, + virDomainAsyncJob asyncJob) + +{ + g_autoptr(GHashTable) blockNamedNodeData = NULL; + const char *snapname = snapdef->parent.name; + g_auto(GStrv) devices = g_new0(char *, vm->def->ndisks + 2); + size_t ndevs = 0; + size_t i = 0; + const char *vmstate_candidate = NULL; + g_autoptr(GHashTable) snapdisks = virHashNew(NULL); + /* following variables add debug information */ + g_auto(virBuffer) errExtraSnap = VIR_BUFFER_INITIALIZER; + g_auto(virBuffer) errSnapWithoutMetadata = VIR_BUFFER_INITIALIZER; + + if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, asyncJob))) + return NULL; + + /* Look up snapshot data from the snapshot object config itself */ + for (i = 0; i < snapdef->ndisks; i++) { + virDomainSnapshotDiskDef *snapdisk = snapdef->disks + i; + virDomainDiskDef *domdisk = virDomainDiskByTarget(vm->def, snapdisk->name); + const char *format_nodename; + qemuBlockNamedNodeData *d = NULL; + qemuBlockNamedNodeDataSnapshot *sn = NULL; + + if (!domdisk) { + /* This can happen only if the snapshot metadata doesn't match the + * domain XML stored in the snapshot */ + virReportError(VIR_ERR_INTERNAL_ERROR, + _("VM doesn't have disk '%1$s' referenced by snapshot '%2$s'"), + snapdisk->name, snapname); + return NULL; + } + + /* later we'll check if all disks from the VM definition XML were considered */ + g_hash_table_insert(snapdisks, g_strdup(snapdisk->name), NULL); + + format_nodename = qemuBlockStorageSourceGetFormatNodename(domdisk->src); + + /* Internal snapshots require image format which supports them, thus + * this effectively rejects any raw images */ + if (format_nodename) + d = g_hash_table_lookup(blockNamedNodeData, format_nodename); + + if (d && d->snapshots) + sn = g_hash_table_lookup(d->snapshots, snapname); + + if (sn) { + if (sn->vmstate) { + if (vmstate_candidate) { + virReportError(VIR_ERR_OPERATION_FAILED, + _("two disks images contain vm state section for internal snapshot '%1$s'"), + snapname); + return NULL; + } + vmstate_candidate = format_nodename; + } + + devices[ndevs++] = g_strdup(format_nodename); + } + + switch (snapdisk->snapshot) { + case VIR_DOMAIN_SNAPSHOT_LOCATION_INTERNAL: + if (!sn) { + virReportError(VIR_ERR_OPERATION_FAILED, + _("image of disk '%1$s' does not have internal snapshot '%2$s'"), + snapdisk->name, snapname); + return NULL; + } + + break; + + case VIR_DOMAIN_SNAPSHOT_LOCATION_EXTERNAL: + case VIR_DOMAIN_SNAPSHOT_LOCATION_MANUAL: + case VIR_DOMAIN_SNAPSHOT_LOCATION_NO: + case VIR_DOMAIN_SNAPSHOT_LOCATION_DEFAULT: + case VIR_DOMAIN_SNAPSHOT_LOCATION_LAST: + if (sn) { + /* Unexpected internal snapshot present in image even if the + * snapshot metadata claims otherwise */ + virBufferAsprintf(&errExtraSnap, "%s ", snapdisk->name); + } + break; + } + } + + /* check if all VM disks were covered */ + for (i = 0; i < vm->def->ndisks; i++) { + virDomainDiskDef *domdisk = vm->def->disks[i]; + const char *format_nodename; + qemuBlockNamedNodeData *d = NULL; + qemuBlockNamedNodeDataSnapshot *sn = NULL; + + if (g_hash_table_contains(snapdisks, domdisk->dst)) + continue; + + format_nodename = qemuBlockStorageSourceGetFormatNodename(domdisk->src); + + if (format_nodename) + d = g_hash_table_lookup(blockNamedNodeData, format_nodename); + + if (d && d->snapshots) + sn = g_hash_table_lookup(d->snapshots, snapname); + + if (sn) { + virBufferAsprintf(&errSnapWithoutMetadata, "%s ", domdisk->dst); + + if (sn->vmstate) { + if (vmstate_candidate) { + virReportError(VIR_ERR_OPERATION_FAILED, + _("two disks images contain vm state section for internal snapshot '%1$s'"), + snapname); + return NULL; + } + vmstate_candidate = format_nodename; + } + + devices[ndevs++] = g_strdup(format_nodename); + } + } + + /* pflash */ + if (vm->def->os.loader && + vm->def->os.loader->nvram && + vm->def->os.loader->nvram->format == VIR_STORAGE_FILE_QCOW2) { + const char *format_nodename; + qemuBlockNamedNodeData *d = NULL; + qemuBlockNamedNodeDataSnapshot *sn = NULL; + + if ((format_nodename = qemuBlockStorageSourceGetFormatNodename(vm->def->os.loader->nvram)) && + (d = virHashLookup(blockNamedNodeData, format_nodename)) && + d->snapshots && + (sn = g_hash_table_lookup(d->snapshots, snapname))) { + if (sn->vmstate) { + if (vmstate_candidate) { + virReportError(VIR_ERR_OPERATION_FAILED, + _("two disks images contain vm state section for internal snapshot '%1$s'"), + snapname); + return NULL; + } + + vmstate_candidate = format_nodename; + } + + devices[ndevs++] = g_strdup(format_nodename); + } + } + + if (virBufferUse(&errExtraSnap) > 0 || + virBufferUse(&errSnapWithoutMetadata) > 0) { + VIR_WARN("inconsistent internal snapshot state (reversion): VM='%s' snapshot='%s' no-snapshot='%s' no-metadata='%s'", + vm->def->name, snapname, + virBufferCurrentContent(&errExtraSnap), + virBufferCurrentContent(&errSnapWithoutMetadata)); + } + + *vmstate = g_strdup(vmstate_candidate); + return g_steal_pointer(&devices); +} + + +int +qemuSnapshotInternalRevert(virDomainObj *vm, + virDomainMomentObj *snapshot, + virDomainAsyncJob asyncJob) +{ + virDomainSnapshotDef *snapdef = virDomainSnapshotObjGetDef(snapshot); + g_autofree char *jobname = g_strdup_printf("internal-snapshot-load-%s", snapdef->parent.name); + qemuBlockJobData *job = NULL; + g_auto(GStrv) devices = NULL; + g_autofree char *vmstate = NULL; + int rc = 0; + int ret = -1; + + if (!(devices = qemuSnapshotInternalRevertGetDevices(vm, snapdef, &vmstate, asyncJob))) + return -1; + + if (!vmstate) { + virReportError(VIR_ERR_OPERATION_FAILED, + _("missing vmstate section when reverting active internal snapshot '%1$s'"), + snapshot->def->name); + return -1; + } + + if (!(job = qemuBlockJobDiskNew(vm, NULL, QEMU_BLOCKJOB_TYPE_SNAPSHOT_LOAD, + jobname))) + return -1; + + qemuBlockJobSyncBegin(job); + + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) + goto cleanup; + + rc = qemuMonitorSnapshotLoad(qemuDomainGetMonitor(vm), jobname, snapdef->parent.name, + vmstate, (const char **) devices); + qemuDomainObjExitMonitor(vm); + + if (rc < 0) + goto cleanup; + + qemuBlockJobStarted(job, vm); + + while (true) { + qemuBlockJobUpdate(vm, job, asyncJob); + + if (job->state == VIR_DOMAIN_BLOCK_JOB_FAILED) { + virReportError(VIR_ERR_OPERATION_FAILED, + _("load of internal snapshot '%1$s' job failed: %2$s"), + snapdef->parent.name, NULLSTR(job->errmsg)); + goto cleanup; + } + + if (job->state == VIR_DOMAIN_BLOCK_JOB_COMPLETED) + break; + + if (qemuDomainObjWait(vm) < 0) + goto cleanup; + } + + ret = 0; + + cleanup: + qemuBlockJobStartupFinalize(vm, job); + + return ret; +} diff --git a/src/qemu/qemu_snapshot.h b/src/qemu/qemu_snapshot.h index 38437a2fd7..f38c2acfb3 100644 --- a/src/qemu/qemu_snapshot.h +++ b/src/qemu/qemu_snapshot.h @@ -84,3 +84,8 @@ qemuSnapshotDiskCreate(qemuSnapshotDiskContext *snapctxt); virDomainSnapshotDiskDef * qemuSnapshotGetTransientDiskDef(virDomainDiskDef *domdisk, const char *suffix); + +int +qemuSnapshotInternalRevert(virDomainObj *vm, + virDomainMomentObj *snapshot, + virDomainAsyncJob asyncJob); -- 2.47.0

On 11/15/24 09:39, Peter Krempa wrote:
This series fixes two things:
- inactive snapshot handling with NVRAM image - use of '-loadvm' commandline option to revert snapshots, both are individually described
Peter Krempa (9): qemu: Don't store path to qemu img qemuDomainSnapshotForEachQcow2Raw: Remove 'driver' argument qemu: Move 'qemuDomainSnapshotForEachQcow2(Raw)' to qemu_snapshot.c qemuSnapshotForEachQcow2: Refactor qemuSnapshotForEachQcow2: Handle also NVRAM image for internal snapshots qemu: monitor: Add monitor infrastructure for 'snapshot-load' QMP command qemu: Add enum entries for 'snapshot-load' qemu job qemu: monitor: Extract vmstate presence for internal snapshots in qemuBlockGetNamedNodeData qemu: Avoid use of '-loadvm' commandline argument for internal snapshot reversion
src/qemu/qemu_block.c | 1 + src/qemu/qemu_blockjob.c | 2 + src/qemu/qemu_blockjob.h | 1 + src/qemu/qemu_command.c | 5 +- src/qemu/qemu_conf.h | 3 - src/qemu/qemu_domain.c | 106 +---- src/qemu/qemu_domain.h | 8 - src/qemu/qemu_driver.c | 3 - src/qemu/qemu_monitor.c | 16 + src/qemu/qemu_monitor.h | 19 +- src/qemu/qemu_monitor_json.c | 49 +- src/qemu/qemu_monitor_json.h | 7 + src/qemu/qemu_process.c | 7 + src/qemu/qemu_snapshot.c | 437 ++++++++++++++++-- src/qemu/qemu_snapshot.h | 5 + tests/qemublocktest.c | 14 +- .../bitmap/snapshots-internal.out | 2 +- 17 files changed, 511 insertions(+), 174 deletions(-)
Reviewed-by: Michal Privoznik <mprivozn@redhat.com> Michal
participants (2)
-
Michal Prívozník
-
Peter Krempa