[PATCH 0/4] Rework qemu internal active snapshots to use QMP

These patches make use of QMP recently added snapshot-save/delete commands and reaps HMP savevm/deletevm. The usage of HMP commands are highly discouraged by QEMU. Nikolai Barybin (4): qemu monitor: add snaphot-save/delete QMP commands qemu blockjob: add snapshot-save/delete job types qemu snapshot: use QMP snapshot-save/delete for internal snapshots qemu monitor: reap qemu_monitor_text po/POTFILES | 1 - po/libvirt.pot | 18 ---- src/qemu/meson.build | 1 - src/qemu/qemu_block.c | 2 + src/qemu/qemu_blockjob.c | 6 +- src/qemu/qemu_blockjob.h | 2 + src/qemu/qemu_domain.c | 4 + src/qemu/qemu_monitor.c | 23 +++-- src/qemu/qemu_monitor.h | 16 +++- src/qemu/qemu_monitor_json.c | 66 +++++++++++++++ src/qemu/qemu_monitor_json.h | 13 +++ src/qemu/qemu_monitor_text.c | 88 ------------------- src/qemu/qemu_monitor_text.h | 29 ------- src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++--- 14 files changed, 267 insertions(+), 160 deletions(-) delete mode 100644 src/qemu/qemu_monitor_text.c delete mode 100644 src/qemu/qemu_monitor_text.h -- 2.43.5

Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com> --- src/qemu/qemu_monitor.c | 30 ++++++++++++++++ src/qemu/qemu_monitor.h | 13 +++++++ src/qemu/qemu_monitor_json.c | 66 ++++++++++++++++++++++++++++++++++++ src/qemu/qemu_monitor_json.h | 13 +++++++ 4 files changed, 122 insertions(+) diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index b1c0c6a064..53f5ecf223 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -2763,6 +2763,36 @@ qemuMonitorDeleteSnapshot(qemuMonitor *mon, const char *name) } +int +qemuMonitorSnapshotSave(qemuMonitor *mon, + const char *jobname, + const char *snapshotName, + const char *vmstateDev, + char **wrdevs) +{ + VIR_DEBUG("jobname=%s, snapshotName=%s, vmstateDev=%s, wrdevs=%p", + jobname, snapshotName, vmstateDev, wrdevs); + + QEMU_CHECK_MONITOR(mon); + + return qemuMonitorJSONSnapshotSave(mon, jobname, snapshotName, vmstateDev, wrdevs); +} + + +int +qemuMonitorSnapshotDelete(qemuMonitor *mon, + const char *jobname, + const char *snapshotName, + char **wrdevs) +{ + VIR_DEBUG("jobname=%s, snapshotName=%s, wrdevs=%p", jobname, snapshotName, wrdevs); + + QEMU_CHECK_MONITOR(mon); + + return qemuMonitorJSONSnapshotDelete(mon, jobname, snapshotName, wrdevs); +} + + int qemuMonitorBlockdevMirror(qemuMonitor *mon, const char *jobname, diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 76c859a888..27dbb78e06 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -1617,3 +1617,16 @@ int qemuMonitorDisplayReload(qemuMonitor *mon, const char *type, bool tlsCerts); + +int +qemuMonitorSnapshotSave(qemuMonitor *mon, + const char *jobname, + const char *snapshotName, + const char *vmstateDev, + char **wrdevs); + +int +qemuMonitorSnapshotDelete(qemuMonitor *mon, + const char *jobname, + const char *snapshotName, + char **wrdevs); diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index 8a20ce57e6..312a4aee04 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -8955,3 +8955,69 @@ int qemuMonitorJSONDisplayReload(qemuMonitor *mon, return 0; } + +int +qemuMonitorJSONSnapshotSave(qemuMonitor *mon, + const char *jobname, + const char *snapshotName, + const char *vmstateDev, + char **wrdevs) +{ + g_autoptr(virJSONValue) cmd = NULL; + g_autoptr(virJSONValue) reply = NULL; + g_autoptr(virJSONValue) wrdev_list = NULL; + size_t i = 0; + + if (wrdevs) { + wrdev_list = virJSONValueNewArray(); + + for (i = 0; wrdevs[i]; i++) + if (virJSONValueArrayAppendString(wrdev_list, wrdevs[i]) < 0) + return -1; + } + + if (!(cmd = qemuMonitorJSONMakeCommand("snapshot-save", + "s:job-id", jobname, + "s:tag", snapshotName, + "s:vmstate", vmstateDev, + "A:devices", &wrdev_list, + NULL))) + return -1; + + if (qemuMonitorJSONCommand(mon, cmd, &reply) < 0) + return -1; + + return qemuMonitorJSONCheckError(cmd, reply); +} + +int +qemuMonitorJSONSnapshotDelete(qemuMonitor *mon, + const char *jobname, + const char *snapshotName, + char **wrdevs) +{ + g_autoptr(virJSONValue) cmd = NULL; + g_autoptr(virJSONValue) reply = NULL; + g_autoptr(virJSONValue) wrdev_list = NULL; + size_t i = 0; + + if (wrdevs) { + wrdev_list = virJSONValueNewArray(); + + for (i = 0; wrdevs[i]; i++) + if (virJSONValueArrayAppendString(wrdev_list, wrdevs[i]) < 0) + return -1; + } + + if (!(cmd = qemuMonitorJSONMakeCommand("snapshot-delete", + "s:job-id", jobname, + "s:tag", snapshotName, + "A:devices", &wrdev_list, + NULL))) + return -1; + + if (qemuMonitorJSONCommand(mon, cmd, &reply) < 0) + return -1; + + return qemuMonitorJSONCheckError(cmd, reply); +} diff --git a/src/qemu/qemu_monitor_json.h b/src/qemu/qemu_monitor_json.h index 921dd34ed2..0245cd54fc 100644 --- a/src/qemu/qemu_monitor_json.h +++ b/src/qemu/qemu_monitor_json.h @@ -825,3 +825,16 @@ qemuMonitorJSONQueryStats(qemuMonitor *mon, int qemuMonitorJSONDisplayReload(qemuMonitor *mon, const char *type, bool tlsCerts); + +int +qemuMonitorJSONSnapshotSave(qemuMonitor *mon, + const char *jobname, + const char *snapshotName, + const char *vmstateDev, + char **wrdevs); + +int +qemuMonitorJSONSnapshotDelete(qemuMonitor *mon, + const char *jobname, + const char *snapshotName, + char **wrdevs); -- 2.43.5

Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com> --- src/qemu/qemu_block.c | 2 ++ src/qemu/qemu_blockjob.c | 6 +++++- src/qemu/qemu_blockjob.h | 2 ++ src/qemu/qemu_domain.c | 4 ++++ 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/qemu/qemu_block.c b/src/qemu/qemu_block.c index d6cdf521c4..414803cff2 100644 --- a/src/qemu/qemu_block.c +++ b/src/qemu/qemu_block.c @@ -3766,6 +3766,8 @@ qemuBlockPivot(virDomainObj *vm, case QEMU_BLOCKJOB_TYPE_INTERNAL: case QEMU_BLOCKJOB_TYPE_CREATE: case QEMU_BLOCKJOB_TYPE_BROKEN: + case QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE: + case QEMU_BLOCKJOB_TYPE_SNAPSHOT_DELETE: virReportError(VIR_ERR_OPERATION_INVALID, _("job type '%1$s' does not support pivot"), qemuBlockjobTypeToString(job->type)); diff --git a/src/qemu/qemu_blockjob.c b/src/qemu/qemu_blockjob.c index 42856df6d4..0d4fdd64ca 100644 --- a/src/qemu/qemu_blockjob.c +++ b/src/qemu/qemu_blockjob.c @@ -68,7 +68,9 @@ VIR_ENUM_IMPL(qemuBlockjob, "backup", "", "create", - "broken"); + "broken", + "snapshot-save", + "snapshot-delete"); static virClass *qemuBlockJobDataClass; @@ -1450,6 +1452,8 @@ qemuBlockJobEventProcessConcludedTransition(qemuBlockJobData *job, break; case QEMU_BLOCKJOB_TYPE_BROKEN: + case QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE: + case QEMU_BLOCKJOB_TYPE_SNAPSHOT_DELETE: case QEMU_BLOCKJOB_TYPE_NONE: case QEMU_BLOCKJOB_TYPE_INTERNAL: case QEMU_BLOCKJOB_TYPE_LAST: diff --git a/src/qemu/qemu_blockjob.h b/src/qemu/qemu_blockjob.h index f1ac43b4c7..d4a3677050 100644 --- a/src/qemu/qemu_blockjob.h +++ b/src/qemu/qemu_blockjob.h @@ -66,6 +66,8 @@ typedef enum { QEMU_BLOCKJOB_TYPE_INTERNAL, QEMU_BLOCKJOB_TYPE_CREATE, QEMU_BLOCKJOB_TYPE_BROKEN, + QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE, + QEMU_BLOCKJOB_TYPE_SNAPSHOT_DELETE, QEMU_BLOCKJOB_TYPE_LAST } qemuBlockJobType; G_STATIC_ASSERT((int)QEMU_BLOCKJOB_TYPE_INTERNAL == VIR_DOMAIN_BLOCK_JOB_TYPE_LAST); diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 2134b11038..f512921187 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -2407,6 +2407,8 @@ qemuDomainObjPrivateXMLFormatBlockjobIterator(void *payload, break; case QEMU_BLOCKJOB_TYPE_BROKEN: + case QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE: + case QEMU_BLOCKJOB_TYPE_SNAPSHOT_DELETE: case QEMU_BLOCKJOB_TYPE_NONE: case QEMU_BLOCKJOB_TYPE_INTERNAL: case QEMU_BLOCKJOB_TYPE_LAST: @@ -2958,6 +2960,8 @@ qemuDomainObjPrivateXMLParseBlockjobDataSpecific(qemuBlockJobData *job, break; case QEMU_BLOCKJOB_TYPE_BROKEN: + case QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE: + case QEMU_BLOCKJOB_TYPE_SNAPSHOT_DELETE: case QEMU_BLOCKJOB_TYPE_NONE: case QEMU_BLOCKJOB_TYPE_INTERNAL: case QEMU_BLOCKJOB_TYPE_LAST: -- 2.43.5

The usage of HMP commands are highly discouraged by qemu. Moreover, current snapshot creation routine does not provide flexibility in choosing target device for VM state snapshot. This patch makes use of QMP commands snapshot-save/delete and by default chooses first writable disk (if present) as target for VM state, NVRAM - otherwise. Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com> --- src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 148 insertions(+), 10 deletions(-) diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index f5260c4a22..83949a9a27 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -308,6 +308,96 @@ qemuSnapshotCreateInactiveExternal(virQEMUDriver *driver, return ret; } +static int +qemuSnapshotActiveInternalGetWrdevListHelper(virDomainObj *vm, + char **wrdevs) +{ + size_t wrdevCount = 0; + size_t i = 0; + + for (i = 0; i < vm->def->ndisks; i++) { + virDomainDiskDef *disk = vm->def->disks[i]; + if (!disk->src->readonly) { + wrdevs[wrdevCount] = g_strdup(disk->src->nodenameformat); + wrdevCount++; + } + } + + if (wrdevCount == 0) { + if (vm->def->os.loader->nvram) { + wrdevs[0] = g_strdup(vm->def->os.loader->nvram->nodenameformat); + } else { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("no writable device for internal snapshot creation/deletion")); + return -1; + } + } + + return 0; +} + + +static int +qemuSnapshotCreateActiveInternalDone(virDomainObj *vm) +{ + qemuBlockJobData *job = NULL; + qemuDomainObjPrivate *priv = vm->privateData; + + if (!(job = virHashLookup(priv->blockjobs, g_strdup_printf("snapsave%d", vm->def->id)))) { + virReportError(VIR_ERR_INTERNAL_ERROR, + _("failed to lookup blockjob 'snapsave%1$d'"), vm->def->id); + return -1; + } + + qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE); + if (job->state == VIR_DOMAIN_BLOCK_JOB_FAILED) { + virReportError(VIR_ERR_INTERNAL_ERROR, + _("snapshot-save job failed: %1$s"), NULLSTR(job->errmsg)); + return -1; + } + + return job->state == VIR_DOMAIN_BLOCK_JOB_COMPLETED ? 1 : 0; +} + + +static int +qemuSnapshotCreateActiveInternalStart(virDomainObj *vm, + const char *name) +{ + qemuBlockJobData *job = NULL; + g_autofree char** wrdevs = NULL; + int ret = -1; + int rc = 0; + + wrdevs = g_new0(char *, vm->def->ndisks + 1); + if (qemuSnapshotActiveInternalGetWrdevListHelper(vm, wrdevs) < 0) + return -1; + + if (!(job = qemuBlockJobDiskNew(vm, NULL, QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE, + g_strdup_printf("snapsave%d", vm->def->id)))) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("failed to create new blockjob")); + return -1; + } + + qemuBlockJobSyncBegin(job); + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) { + ret = -1; + goto cleanup; + } + + rc = qemuMonitorSnapshotSave(qemuDomainGetMonitor(vm), job->name, + name, wrdevs[0], wrdevs); + qemuDomainObjExitMonitor(vm); + if (rc == 0) { + qemuBlockJobStarted(job, vm); + ret = 0; + } + + cleanup: + qemuBlockJobStartupFinalize(vm, job); + return ret; +} + /* The domain is expected to be locked and active. */ static int @@ -316,11 +406,11 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver, virDomainMomentObj *snap, unsigned int flags) { - qemuDomainObjPrivate *priv = vm->privateData; virObjectEvent *event = NULL; bool resume = false; virDomainSnapshotDef *snapdef = virDomainSnapshotObjGetDef(snap); int ret = -1; + int rv = 0; if (!qemuMigrationSrcIsAllowed(vm, false, VIR_ASYNC_JOB_SNAPSHOT, 0)) goto cleanup; @@ -342,15 +432,17 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver, } } - if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) { + if ((ret = qemuSnapshotCreateActiveInternalStart(vm, snap->def->name)) < 0) { resume = false; goto cleanup; } - ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name); - qemuDomainObjExitMonitor(vm); - if (ret < 0) - goto cleanup; + while ((rv = qemuSnapshotCreateActiveInternalDone(vm)) != 1) { + if (rv < 0 || qemuDomainObjWait(vm) < 0) { + ret = -1; + goto cleanup; + } + } if (!(snapdef->cookie = (virObject *) qemuDomainSaveCookieNew(vm))) goto cleanup; @@ -3603,6 +3695,55 @@ qemuSnapshotDiscardMetadata(virDomainObj *vm, } +static int +qemuSnapshotDiscardActiveInternal(virDomainObj *vm, + const char *name) +{ + qemuBlockJobData *job = NULL; + g_autofree char** wrdevs = NULL; + int ret = -1; + int rc = 0; + + wrdevs = g_new0(char *, vm->def->ndisks + 1); + if (qemuSnapshotActiveInternalGetWrdevListHelper(vm, wrdevs) < 0) + return -1; + + if (!(job = qemuBlockJobDiskNew(vm, NULL, QEMU_BLOCKJOB_TYPE_SNAPSHOT_DELETE, + g_strdup_printf("snapdelete%d", vm->def->id)))) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("failed to create new blockjob")); + return -1; + } + + qemuBlockJobSyncBegin(job); + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) + goto cleanup; + + rc = qemuMonitorSnapshotDelete(qemuDomainGetMonitor(vm), job->name, name, wrdevs); + qemuDomainObjExitMonitor(vm); + if (rc == 0) { + qemuBlockJobStarted(job, vm); + ret = 0; + } else { + goto cleanup; + } + + while (job->state != VIR_DOMAIN_BLOCK_JOB_COMPLETED) { + qemuDomainObjWait(vm); + qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE); + if (job->state == VIR_DOMAIN_BLOCK_JOB_FAILED) { + virReportError(VIR_ERR_INTERNAL_ERROR, + _("snapshot-delete job failed: %1$s"), NULLSTR(job->errmsg)); + ret = -1; + break; + } + } + + cleanup: + qemuBlockJobStartupFinalize(vm, job); + return ret; +} + + /* Discard one snapshot (or its metadata), without reparenting any children. */ static int qemuSnapshotDiscardImpl(virQEMUDriver *driver, @@ -3645,11 +3786,8 @@ qemuSnapshotDiscardImpl(virQEMUDriver *driver, /* Similarly as internal snapshot creation we would use a regular job * here so set a mask to forbid any other job. */ qemuDomainObjSetAsyncJobMask(vm, VIR_JOB_NONE); - if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) - return -1; /* we continue on even in the face of error */ - qemuMonitorDeleteSnapshot(qemuDomainGetMonitor(vm), snap->def->name); - qemuDomainObjExitMonitor(vm); + qemuSnapshotDiscardActiveInternal(vm, snap->def->name); qemuDomainObjSetAsyncJobMask(vm, VIR_JOB_DEFAULT_MASK); } } -- 2.43.5

On 7/16/24 00:42, Nikolai Barybin wrote:
The usage of HMP commands are highly discouraged by qemu. Moreover, current snapshot creation routine does not provide flexibility in choosing target device for VM state snapshot.
This patch makes use of QMP commands snapshot-save/delete and by default chooses first writable disk (if present) as target for VM state, NVRAM - otherwise.
Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com> --- src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 148 insertions(+), 10 deletions(-)
diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index f5260c4a22..83949a9a27 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -308,6 +308,96 @@ qemuSnapshotCreateInactiveExternal(virQEMUDriver *driver, return ret; }
+static int +qemuSnapshotActiveInternalGetWrdevListHelper(virDomainObj *vm, + char **wrdevs) +{ + size_t wrdevCount = 0; + size_t i = 0; + + for (i = 0; i < vm->def->ndisks; i++) { + virDomainDiskDef *disk = vm->def->disks[i]; + if (!disk->src->readonly) { + wrdevs[wrdevCount] = g_strdup(disk->src->nodenameformat); + wrdevCount++; + } + } + + if (wrdevCount == 0) { + if (vm->def->os.loader->nvram) { + wrdevs[0] = g_strdup(vm->def->os.loader->nvram->nodenameformat); + } else { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("no writable device for internal snapshot creation/deletion")); + return -1; + } + } +
should we select NVRAM at all? IMHO that would be very problematic. I think that we should generate error in this case. Den

On Tue, Jul 16, 2024 at 10:21:12 +0200, Denis V. Lunev via Devel wrote:
On 7/16/24 00:42, Nikolai Barybin wrote:
The usage of HMP commands are highly discouraged by qemu. Moreover, current snapshot creation routine does not provide flexibility in choosing target device for VM state snapshot.
This patch makes use of QMP commands snapshot-save/delete and by default chooses first writable disk (if present) as target for VM state, NVRAM - otherwise.
Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com> --- src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 148 insertions(+), 10 deletions(-)
diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index f5260c4a22..83949a9a27 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -308,6 +308,96 @@ qemuSnapshotCreateInactiveExternal(virQEMUDriver *driver, return ret; } +static int +qemuSnapshotActiveInternalGetWrdevListHelper(virDomainObj *vm, + char **wrdevs) +{ + size_t wrdevCount = 0; + size_t i = 0; + + for (i = 0; i < vm->def->ndisks; i++) { + virDomainDiskDef *disk = vm->def->disks[i]; + if (!disk->src->readonly) { + wrdevs[wrdevCount] = g_strdup(disk->src->nodenameformat); + wrdevCount++; + } + } + + if (wrdevCount == 0) { + if (vm->def->os.loader->nvram) { + wrdevs[0] = g_strdup(vm->def->os.loader->nvram->nodenameformat); + } else { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("no writable device for internal snapshot creation/deletion")); + return -1; + } + } +
should we select NVRAM at all?
IMHO that would be very problematic. I think that we should generate error in this case.
No NVRAM image must *never* be selected as target for the VM state. Libvirt curretnly forbids internal snapshots with UEFI precisely because NVRAM image could have been selected historically and we do not want that ever happening.

On Tue, Jul 16, 2024 at 10:32:56AM +0200, Peter Krempa wrote:
On Tue, Jul 16, 2024 at 10:21:12 +0200, Denis V. Lunev via Devel wrote:
On 7/16/24 00:42, Nikolai Barybin wrote:
The usage of HMP commands are highly discouraged by qemu. Moreover, current snapshot creation routine does not provide flexibility in choosing target device for VM state snapshot.
This patch makes use of QMP commands snapshot-save/delete and by default chooses first writable disk (if present) as target for VM state, NVRAM - otherwise.
Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com> --- src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 148 insertions(+), 10 deletions(-)
diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index f5260c4a22..83949a9a27 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -308,6 +308,96 @@ qemuSnapshotCreateInactiveExternal(virQEMUDriver *driver, return ret; } +static int +qemuSnapshotActiveInternalGetWrdevListHelper(virDomainObj *vm, + char **wrdevs) +{ + size_t wrdevCount = 0; + size_t i = 0; + + for (i = 0; i < vm->def->ndisks; i++) { + virDomainDiskDef *disk = vm->def->disks[i]; + if (!disk->src->readonly) { + wrdevs[wrdevCount] = g_strdup(disk->src->nodenameformat); + wrdevCount++; + } + } + + if (wrdevCount == 0) { + if (vm->def->os.loader->nvram) { + wrdevs[0] = g_strdup(vm->def->os.loader->nvram->nodenameformat); + } else { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("no writable device for internal snapshot creation/deletion")); + return -1; + } + } +
should we select NVRAM at all?
IMHO that would be very problematic. I think that we should generate error in this case.
No NVRAM image must *never* be selected as target for the VM state. Libvirt curretnly forbids internal snapshots with UEFI precisely because NVRAM image could have been selected historically and we do not want that ever happening.
If we DO have a writable disk though, and the NVRAM is in qcow2 format, then we should be including NVRAM in the snapshot operation, but the state snapshot should be stored in the primary disk, not the NVRAM. With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

On 7/16/24 00:42, Nikolai Barybin wrote:
The usage of HMP commands are highly discouraged by qemu. Moreover, current snapshot creation routine does not provide flexibility in choosing target device for VM state snapshot.
This patch makes use of QMP commands snapshot-save/delete and by default chooses first writable disk (if present) as target for VM state, NVRAM - otherwise.
Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com> --- src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 148 insertions(+), 10 deletions(-)
diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index f5260c4a22..83949a9a27 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -308,6 +308,96 @@ qemuSnapshotCreateInactiveExternal(virQEMUDriver *driver, return ret; }
+static int +qemuSnapshotActiveInternalGetWrdevListHelper(virDomainObj *vm, + char **wrdevs) +{ + size_t wrdevCount = 0; + size_t i = 0; + + for (i = 0; i < vm->def->ndisks; i++) { + virDomainDiskDef *disk = vm->def->disks[i]; + if (!disk->src->readonly) { + wrdevs[wrdevCount] = g_strdup(disk->src->nodenameformat); + wrdevCount++; + } + } + + if (wrdevCount == 0) { + if (vm->def->os.loader->nvram) { + wrdevs[0] = g_strdup(vm->def->os.loader->nvram->nodenameformat); + } else { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("no writable device for internal snapshot creation/deletion")); + return -1; + } + } + + return 0; +} + + +static int +qemuSnapshotCreateActiveInternalDone(virDomainObj *vm) +{ + qemuBlockJobData *job = NULL; + qemuDomainObjPrivate *priv = vm->privateData; + + if (!(job = virHashLookup(priv->blockjobs, g_strdup_printf("snapsave%d", vm->def->id)))) { + virReportError(VIR_ERR_INTERNAL_ERROR, + _("failed to lookup blockjob 'snapsave%1$d'"), vm->def->id); + return -1; + } + + qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE); + if (job->state == VIR_DOMAIN_BLOCK_JOB_FAILED) { + virReportError(VIR_ERR_INTERNAL_ERROR, + _("snapshot-save job failed: %1$s"), NULLSTR(job->errmsg)); + return -1; + } + + return job->state == VIR_DOMAIN_BLOCK_JOB_COMPLETED ? 1 : 0; +} + + +static int +qemuSnapshotCreateActiveInternalStart(virDomainObj *vm, + const char *name) +{ + qemuBlockJobData *job = NULL; + g_autofree char** wrdevs = NULL; + int ret = -1; + int rc = 0; + + wrdevs = g_new0(char *, vm->def->ndisks + 1); + if (qemuSnapshotActiveInternalGetWrdevListHelper(vm, wrdevs) < 0) + return -1; + + if (!(job = qemuBlockJobDiskNew(vm, NULL, QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE, + g_strdup_printf("snapsave%d", vm->def->id)))) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("failed to create new blockjob")); + return -1; + } + + qemuBlockJobSyncBegin(job); + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) { + ret = -1; + goto cleanup; + } + + rc = qemuMonitorSnapshotSave(qemuDomainGetMonitor(vm), job->name, + name, wrdevs[0], wrdevs); + qemuDomainObjExitMonitor(vm); + if (rc == 0) { + qemuBlockJobStarted(job, vm); + ret = 0; + } + + cleanup: + qemuBlockJobStartupFinalize(vm, job); + return ret; +} +
/* The domain is expected to be locked and active. */ static int @@ -316,11 +406,11 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver, virDomainMomentObj *snap, unsigned int flags) { - qemuDomainObjPrivate *priv = vm->privateData; virObjectEvent *event = NULL; bool resume = false; virDomainSnapshotDef *snapdef = virDomainSnapshotObjGetDef(snap); int ret = -1; + int rv = 0;
if (!qemuMigrationSrcIsAllowed(vm, false, VIR_ASYNC_JOB_SNAPSHOT, 0)) goto cleanup; @@ -342,15 +432,17 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver, } }
- if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) { + if ((ret = qemuSnapshotCreateActiveInternalStart(vm, snap->def->name)) < 0) { resume = false; goto cleanup; }
- ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name); - qemuDomainObjExitMonitor(vm); - if (ret < 0) - goto cleanup;
'snapshot-save' has been added in QEMU 6.0. Right now we are at 8.2 Should we still support pre 6.0 versions, i.e. check the availability of the command via proper capability? Den

On Tue, Jul 16, 2024 at 10:27:29 +0200, Denis V. Lunev via Devel wrote:
On 7/16/24 00:42, Nikolai Barybin wrote:
The usage of HMP commands are highly discouraged by qemu. Moreover, current snapshot creation routine does not provide flexibility in choosing target device for VM state snapshot.
This patch makes use of QMP commands snapshot-save/delete and by default chooses first writable disk (if present) as target for VM state, NVRAM - otherwise.
Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com> --- src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 148 insertions(+), 10 deletions(-)
[...]
@@ -342,15 +432,17 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver, } } - if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) { + if ((ret = qemuSnapshotCreateActiveInternalStart(vm, snap->def->name)) < 0) { resume = false; goto cleanup; } - ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name); - qemuDomainObjExitMonitor(vm); - if (ret < 0) - goto cleanup;
'snapshot-save' has been added in QEMU 6.0. Right now we are at 8.2
Should we still support pre 6.0 versions, i.e. check the availability of the command via proper capability?
Libvirt currently supports qemu-5.2 as the oldest version, and since that does not yet support the new commands we will need to use capabilities and retain the old code. This also means that patch 4/4 must be dropped as it's required for that one release unfortunately. qemu-5.2 seems to be currently the version in Debian 11 and openSUSE Leap 15.3 per commit 073bf167843ca2e788cbc581781c729ff26c80f5 which bumped it recently

On Tue, Jul 16, 2024 at 01:42:27AM +0300, Nikolai Barybin via Devel wrote:
The usage of HMP commands are highly discouraged by qemu. Moreover, current snapshot creation routine does not provide flexibility in choosing target device for VM state snapshot.
This patch makes use of QMP commands snapshot-save/delete and by default chooses first writable disk (if present) as target for VM state, NVRAM - otherwise.
Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com> --- src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 148 insertions(+), 10 deletions(-)
diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index f5260c4a22..83949a9a27 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -308,6 +308,96 @@ qemuSnapshotCreateInactiveExternal(virQEMUDriver *driver, return ret; }
+static int +qemuSnapshotActiveInternalGetWrdevListHelper(virDomainObj *vm, + char **wrdevs) +{ + size_t wrdevCount = 0; + size_t i = 0; + + for (i = 0; i < vm->def->ndisks; i++) { + virDomainDiskDef *disk = vm->def->disks[i]; + if (!disk->src->readonly) { + wrdevs[wrdevCount] = g_strdup(disk->src->nodenameformat); + wrdevCount++; + } + }
Being !readonly is not sufficient criteria form allowing use of internal snapshots. It also must be !shareable. It also must be of a format that permits snapshots, ie effectively only qcow2. We should likely deny snapshots entirely if there are any shareable disks, since I can't see it being conceptually sensible to snapshot a VM while excluding shared writable disks. Likewise dney snapshots if any writable disk is non-qcow2, as snapshots need to be consistent across the entire VM writable storage set.
+ + if (wrdevCount == 0) { + if (vm->def->os.loader->nvram) { + wrdevs[0] = g_strdup(vm->def->os.loader->nvram->nodenameformat);
This doesn't make sense IMHO. If there are not writable disks, we shouldn't be trying to snapshot at all.
+ } else { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("no writable device for internal snapshot creation/deletion")); + return -1; + } + } + + return 0; +} + + +static int +qemuSnapshotCreateActiveInternalDone(virDomainObj *vm) +{ + qemuBlockJobData *job = NULL; + qemuDomainObjPrivate *priv = vm->privateData; + + if (!(job = virHashLookup(priv->blockjobs, g_strdup_printf("snapsave%d", vm->def->id)))) { + virReportError(VIR_ERR_INTERNAL_ERROR, + _("failed to lookup blockjob 'snapsave%1$d'"), vm->def->id); + return -1; + } + + qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE); + if (job->state == VIR_DOMAIN_BLOCK_JOB_FAILED) { + virReportError(VIR_ERR_INTERNAL_ERROR, + _("snapshot-save job failed: %1$s"), NULLSTR(job->errmsg)); + return -1; + } + + return job->state == VIR_DOMAIN_BLOCK_JOB_COMPLETED ? 1 : 0; +} + + +static int +qemuSnapshotCreateActiveInternalStart(virDomainObj *vm, + const char *name) +{ + qemuBlockJobData *job = NULL; + g_autofree char** wrdevs = NULL; + int ret = -1; + int rc = 0; + + wrdevs = g_new0(char *, vm->def->ndisks + 1);
IMHO the qemuSnapshotActiveInternalGetWrdevListHelper method should be allocating this, to the correct size it needs.
+ if (qemuSnapshotActiveInternalGetWrdevListHelper(vm, wrdevs) < 0) + return -1; + + if (!(job = qemuBlockJobDiskNew(vm, NULL, QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE, + g_strdup_printf("snapsave%d", vm->def->id)))) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("failed to create new blockjob")); + return -1; + } + + qemuBlockJobSyncBegin(job); + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) { + ret = -1; + goto cleanup; + } + + rc = qemuMonitorSnapshotSave(qemuDomainGetMonitor(vm), job->name, + name, wrdevs[0], wrdevs); + qemuDomainObjExitMonitor(vm); + if (rc == 0) { + qemuBlockJobStarted(job, vm); + ret = 0; + } + + cleanup: + qemuBlockJobStartupFinalize(vm, job); + return ret; +} +
With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com> --- po/POTFILES | 1 - po/libvirt.pot | 18 -------- src/qemu/meson.build | 1 - src/qemu/qemu_monitor.c | 25 ---------- src/qemu/qemu_monitor.h | 3 -- src/qemu/qemu_monitor_text.c | 88 ------------------------------------ src/qemu/qemu_monitor_text.h | 29 ------------ 7 files changed, 165 deletions(-) delete mode 100644 src/qemu/qemu_monitor_text.c delete mode 100644 src/qemu/qemu_monitor_text.h diff --git a/po/POTFILES b/po/POTFILES index 1ed4086d2c..28773c6d78 100644 --- a/po/POTFILES +++ b/po/POTFILES @@ -185,7 +185,6 @@ src/qemu/qemu_migration_cookie.c src/qemu/qemu_migration_params.c src/qemu/qemu_monitor.c src/qemu/qemu_monitor_json.c -src/qemu/qemu_monitor_text.c src/qemu/qemu_namespace.c src/qemu/qemu_nbdkit.c src/qemu/qemu_passt.c diff --git a/po/libvirt.pot b/po/libvirt.pot index 30f9344015..15c0871352 100644 --- a/po/libvirt.pot +++ b/po/libvirt.pot @@ -8794,11 +8794,6 @@ msgstr "" msgid "Failed to delete snapshot %1$s" msgstr "" -#: src/qemu/qemu_monitor_text.c:83 -#, c-format -msgid "Failed to delete snapshot: %1$s" -msgstr "" - #: src/bhyve/bhyve_driver.c:389 src/libxl/libxl_driver.c:4670 #: src/lxc/lxc_driver.c:2495 src/network/bridge_driver.c:3618 #: src/qemu/qemu_driver.c:7793 src/storage/storage_driver.c:1399 @@ -10640,11 +10635,6 @@ msgstr "" msgid "Failed to symlink device %1$s to %2$s" msgstr "" -#: src/qemu/qemu_monitor_text.c:52 -#, c-format -msgid "Failed to take snapshot: %1$s" -msgstr "" - #: src/util/virprocess.c:424 src/util/virprocess.c:435 #, c-format msgid "Failed to terminate process %1$lld with SIG%2$s" @@ -44930,14 +44920,6 @@ msgstr "" msgid "this disk doesn't support update" msgstr "" -#: src/qemu/qemu_monitor_text.c:74 -msgid "this domain does not have a device to delete snapshots" -msgstr "" - -#: src/qemu/qemu_monitor_text.c:56 -msgid "this domain does not have a device to take snapshots" -msgstr "" - #: src/util/virerror.c:1042 msgid "this domain exists already" msgstr "" diff --git a/src/qemu/meson.build b/src/qemu/meson.build index 57356451e4..ec0572fdeb 100644 --- a/src/qemu/meson.build +++ b/src/qemu/meson.build @@ -27,7 +27,6 @@ qemu_driver_sources = [ 'qemu_migration_params.c', 'qemu_monitor.c', 'qemu_monitor_json.c', - 'qemu_monitor_text.c', 'qemu_namespace.c', 'qemu_nbdkit.c', 'qemu_passt.c', diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index 53f5ecf223..ccd937361c 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -28,7 +28,6 @@ #include "qemu_alias.h" #include "qemu_monitor.h" -#include "qemu_monitor_text.h" #include "qemu_monitor_json.h" #include "qemu_domain.h" #include "qemu_capabilities.h" @@ -2739,30 +2738,6 @@ qemuMonitorDelObject(qemuMonitor *mon, } -int -qemuMonitorCreateSnapshot(qemuMonitor *mon, const char *name) -{ - VIR_DEBUG("name=%s", name); - - QEMU_CHECK_MONITOR(mon); - - /* there won't ever be a direct QMP replacement for this function */ - return qemuMonitorTextCreateSnapshot(mon, name); -} - - -int -qemuMonitorDeleteSnapshot(qemuMonitor *mon, const char *name) -{ - VIR_DEBUG("name=%s", name); - - QEMU_CHECK_MONITOR(mon); - - /* there won't ever be a direct QMP replacement for this function */ - return qemuMonitorTextDeleteSnapshot(mon, name); -} - - int qemuMonitorSnapshotSave(qemuMonitor *mon, const char *jobname, diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 27dbb78e06..7c9f014b61 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -960,9 +960,6 @@ int qemuMonitorDelObject(qemuMonitor *mon, const char *objalias, bool report_error); -int qemuMonitorCreateSnapshot(qemuMonitor *mon, const char *name); -int qemuMonitorDeleteSnapshot(qemuMonitor *mon, const char *name); - int qemuMonitorTransaction(qemuMonitor *mon, virJSONValue **actions) ATTRIBUTE_NONNULL(2); int qemuMonitorBlockdevMirror(qemuMonitor *mon, diff --git a/src/qemu/qemu_monitor_text.c b/src/qemu/qemu_monitor_text.c deleted file mode 100644 index 3482972600..0000000000 --- a/src/qemu/qemu_monitor_text.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * qemu_monitor_text.c: interaction with QEMU monitor console - * - * Copyright (C) 2006-2014 Red Hat, Inc. - * Copyright (C) 2006 Daniel P. Berrange - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library. If not, see - * <http://www.gnu.org/licenses/>. - */ - -#include <config.h> - - -#include "qemu_monitor_text.h" -#include "qemu_monitor_json.h" -#include "virlog.h" -#include "virerror.h" - -#define VIR_FROM_THIS VIR_FROM_QEMU - -VIR_LOG_INIT("qemu.qemu_monitor_text"); - -int -qemuMonitorTextCreateSnapshot(qemuMonitor *mon, - const char *name) -{ - g_autofree char *cmd = NULL; - g_autofree char *reply = NULL; - - cmd = g_strdup_printf("savevm \"%s\"", name); - - if (qemuMonitorJSONHumanCommand(mon, cmd, -1, &reply)) - return -1; - - if (strstr(reply, "Error while creating snapshot") || - strstr(reply, "Could not open VM state file") || - strstr(reply, "State blocked by non-migratable device") || - strstr(reply, "Error: ") || - (strstr(reply, "Error") && strstr(reply, "while writing VM"))) { - virReportError(VIR_ERR_OPERATION_FAILED, - _("Failed to take snapshot: %1$s"), reply); - return -1; - } else if (strstr(reply, "No block device can accept snapshots")) { - virReportError(VIR_ERR_OPERATION_INVALID, "%s", - _("this domain does not have a device to take snapshots")); - return -1; - } - - return 0; -} - -int qemuMonitorTextDeleteSnapshot(qemuMonitor *mon, const char *name) -{ - g_autofree char *cmd = NULL; - g_autofree char *reply = NULL; - - cmd = g_strdup_printf("delvm \"%s\"", name); - if (qemuMonitorJSONHumanCommand(mon, cmd, -1, &reply)) - return -1; - - if (strstr(reply, "No block device supports snapshots")) { - virReportError(VIR_ERR_OPERATION_INVALID, "%s", - _("this domain does not have a device to delete snapshots")); - return -1; - } else if (strstr(reply, "Snapshots not supported on device")) { - virReportError(VIR_ERR_OPERATION_INVALID, "%s", reply); - return -1; - } else if (strstr(reply, "Error: ") || - (strstr(reply, "Error") && - strstr(reply, "while deleting snapshot"))) { - virReportError(VIR_ERR_OPERATION_FAILED, - _("Failed to delete snapshot: %1$s"), reply); - return -1; - } - - return 0; -} diff --git a/src/qemu/qemu_monitor_text.h b/src/qemu/qemu_monitor_text.h deleted file mode 100644 index 27d0f061d3..0000000000 --- a/src/qemu/qemu_monitor_text.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * qemu_monitor_text.h: interaction with QEMU monitor console - * - * Copyright (C) 2006-2009, 2011-2012 Red Hat, Inc. - * Copyright (C) 2006 Daniel P. Berrange - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library. If not, see - * <http://www.gnu.org/licenses/>. - */ - -#pragma once - -#include "internal.h" - -#include "qemu_monitor.h" - -int qemuMonitorTextCreateSnapshot(qemuMonitor *mon, const char *name); -int qemuMonitorTextDeleteSnapshot(qemuMonitor *mon, const char *name); -- 2.43.5

Hello everyone! This is a ping email On 7/16/24 00:42, Nikolai Barybin wrote:
These patches make use of QMP recently added snapshot-save/delete commands and reaps HMP savevm/deletevm. The usage of HMP commands are highly discouraged by QEMU.
Nikolai Barybin (4): qemu monitor: add snaphot-save/delete QMP commands qemu blockjob: add snapshot-save/delete job types qemu snapshot: use QMP snapshot-save/delete for internal snapshots qemu monitor: reap qemu_monitor_text
po/POTFILES | 1 - po/libvirt.pot | 18 ---- src/qemu/meson.build | 1 - src/qemu/qemu_block.c | 2 + src/qemu/qemu_blockjob.c | 6 +- src/qemu/qemu_blockjob.h | 2 + src/qemu/qemu_domain.c | 4 + src/qemu/qemu_monitor.c | 23 +++-- src/qemu/qemu_monitor.h | 16 +++- src/qemu/qemu_monitor_json.c | 66 +++++++++++++++ src/qemu/qemu_monitor_json.h | 13 +++ src/qemu/qemu_monitor_text.c | 88 ------------------- src/qemu/qemu_monitor_text.h | 29 ------- src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++--- 14 files changed, 267 insertions(+), 160 deletions(-) delete mode 100644 src/qemu/qemu_monitor_text.c delete mode 100644 src/qemu/qemu_monitor_text.h

On 7/26/24 12:35, Nikolai Barybin wrote: > Hello everyone! This is a ping email > > On 7/16/24 00:42, Nikolai Barybin wrote: >> These patches make use of QMP recently added snapshot-save/delete >> commands and reaps HMP savevm/deletevm. The usage of HMP commands >> are highly discouraged by QEMU. >> >> Nikolai Barybin (4): >> qemu monitor: add snaphot-save/delete QMP commands >> qemu blockjob: add snapshot-save/delete job types >> qemu snapshot: use QMP snapshot-save/delete for internal snapshots >> qemu monitor: reap qemu_monitor_text >> >> po/POTFILES | 1 - >> po/libvirt.pot | 18 ---- >> src/qemu/meson.build | 1 - >> src/qemu/qemu_block.c | 2 + >> src/qemu/qemu_blockjob.c | 6 +- >> src/qemu/qemu_blockjob.h | 2 + >> src/qemu/qemu_domain.c | 4 + >> src/qemu/qemu_monitor.c | 23 +++-- >> src/qemu/qemu_monitor.h | 16 +++- >> src/qemu/qemu_monitor_json.c | 66 +++++++++++++++ >> src/qemu/qemu_monitor_json.h | 13 +++ >> src/qemu/qemu_monitor_text.c | 88 ------------------- >> src/qemu/qemu_monitor_text.h | 29 ------- >> src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++--- >> 14 files changed, 267 insertions(+), 160 deletions(-) >> delete mode 100644 src/qemu/qemu_monitor_text.c >> delete mode 100644 src/qemu/qemu_monitor_text.h >> 1. Please do not top-post 2. I believe you are pinging wrong thread, you should do that in v2
participants (4)
-
Daniel P. Berrangé
-
Denis V. Lunev
-
Nikolai Barybin
-
Peter Krempa