[libvirt PATCH 0/4] qemu: make nvram creation more robust and enable recovery

Daniel P. Berrangé (4): qemu: do crash safe creation of NVRAM file include: define constants for resetting NVRAM state qemu: wire up support for resetting NVRAM tools: add --reset-nvram arg to several virsh commands docs/manpages/virsh.rst | 21 ++++++++++++--- include/libvirt/libvirt-domain-snapshot.h | 1 + include/libvirt/libvirt-domain.h | 2 ++ src/libvirt-domain.c | 16 ++++++++++++ src/qemu/qemu_driver.c | 24 ++++++++++++----- src/qemu/qemu_process.c | 32 ++++++++++++++++++----- src/qemu/qemu_process.h | 1 + src/qemu/qemu_saveimage.c | 9 +++++-- src/qemu/qemu_saveimage.h | 1 + src/qemu/qemu_snapshot.c | 6 ++++- tools/virsh-domain.c | 18 +++++++++++++ tools/virsh-snapshot.c | 6 +++++ 12 files changed, 117 insertions(+), 20 deletions(-) -- 2.34.1

If we crash part way through writing the NVRAM file we end up with an unusable NVRAM on file. To avoid this we need to write to a temporary file and fsync(2) at the end, then rename to the real NVRAM file path. Signed-off-by: Daniel P. Berrangé <berrange@redhat.com> --- src/qemu/qemu_process.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index c13280c8f3..bc7c2a4dbc 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -4421,6 +4421,7 @@ qemuPrepareNVRAM(virQEMUDriver *driver, bool created = false; const char *master_nvram_path; ssize_t r; + g_autofree char *tmp_dst_path = NULL; if (!loader || !loader->nvram || virFileExists(loader->nvram)) return 0; @@ -4451,14 +4452,15 @@ qemuPrepareNVRAM(virQEMUDriver *driver, goto cleanup; } - if ((dstFD = virFileOpenAs(loader->nvram, + tmp_dst_path = g_strdup_printf("%s.tmp", loader->nvram); + if ((dstFD = virFileOpenAs(tmp_dst_path, O_WRONLY | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR, cfg->user, cfg->group, VIR_FILE_OPEN_FORCE_OWNER)) < 0) { virReportSystemError(-dstFD, _("Failed to create file '%s'"), - loader->nvram); + tmp_dst_path); goto cleanup; } @@ -4477,7 +4479,7 @@ qemuPrepareNVRAM(virQEMUDriver *driver, if (safewrite(dstFD, buf, r) < 0) { virReportSystemError(errno, _("Unable to write to file '%s'"), - loader->nvram); + tmp_dst_path); goto cleanup; } } while (r); @@ -4488,9 +4490,23 @@ qemuPrepareNVRAM(virQEMUDriver *driver, master_nvram_path); goto cleanup; } + + if (g_fsync(dstFD) < 0) { + virReportSystemError(errno, _("cannot sync file '%s'"), + tmp_dst_path); + goto cleanup; + } + if (VIR_CLOSE(dstFD) < 0) { virReportSystemError(errno, _("Unable to close file '%s'"), + tmp_dst_path); + goto cleanup; + } + + if (rename(tmp_dst_path, loader->nvram) < 0) { + virReportSystemError(errno, + _("Unable to replace '%s'"), loader->nvram); goto cleanup; } @@ -4501,7 +4517,7 @@ qemuPrepareNVRAM(virQEMUDriver *driver, * copy the file content. Roll back. */ if (ret < 0) { if (created) - unlink(loader->nvram); + unlink(tmp_dst_path); } VIR_FORCE_CLOSE(srcFD); -- 2.34.1

When starting a guest with pflash based firmware, we will initialize NVRAM from a template if it does not already exist. In theory if the firmware code file is updated, the existing NVRAM variables should continue to work correctly. It is inevitable that this could break accidentally one day. Or a bug in the firmware might corrupt the NVRAM storage. Or user might make bad changes to the settings that prevent booting. Or the user might have re-configured the XML to point to a different firmware file incompatible with the current variables. In all these cases it would be useful to delete the existing NVRAM and initialize it from the pristine template. To support this introduce a VIR_DOMAIN_START_RESET_NVRAM constant for use with virDomainCreate / virDomainCreateXML, along with a VIR_DOMAIN_SAVE_RESET_NVRAM constant for use with virDomainRestore. Signed-off-by: Daniel P. Berrangé <berrange@redhat.com> --- include/libvirt/libvirt-domain-snapshot.h | 1 + include/libvirt/libvirt-domain.h | 2 ++ src/libvirt-domain.c | 16 ++++++++++++++++ 3 files changed, 19 insertions(+) diff --git a/include/libvirt/libvirt-domain-snapshot.h b/include/libvirt/libvirt-domain-snapshot.h index 90673ed0fb..d729d1a532 100644 --- a/include/libvirt/libvirt-domain-snapshot.h +++ b/include/libvirt/libvirt-domain-snapshot.h @@ -198,6 +198,7 @@ typedef enum { VIR_DOMAIN_SNAPSHOT_REVERT_RUNNING = 1 << 0, /* Run after revert */ VIR_DOMAIN_SNAPSHOT_REVERT_PAUSED = 1 << 1, /* Pause after revert */ VIR_DOMAIN_SNAPSHOT_REVERT_FORCE = 1 << 2, /* Allow risky reverts */ + VIR_DOMAIN_SNAPSHOT_REVERT_RESET_NVRAM = 1 << 3, /* Re-initialize NVRAM from template */ } virDomainSnapshotRevertFlags; /* Revert the domain to a point-in-time snapshot. The diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h index 374859fdff..8c16598817 100644 --- a/include/libvirt/libvirt-domain.h +++ b/include/libvirt/libvirt-domain.h @@ -302,6 +302,7 @@ typedef enum { VIR_DOMAIN_START_BYPASS_CACHE = 1 << 2, /* Avoid file system cache pollution */ VIR_DOMAIN_START_FORCE_BOOT = 1 << 3, /* Boot, discarding any managed save */ VIR_DOMAIN_START_VALIDATE = 1 << 4, /* Validate the XML document against schema */ + VIR_DOMAIN_START_RESET_NVRAM = 1 << 5, /* Re-initialize NVRAM from template */ } virDomainCreateFlags; @@ -1268,6 +1269,7 @@ typedef enum { VIR_DOMAIN_SAVE_BYPASS_CACHE = 1 << 0, /* Avoid file system cache pollution */ VIR_DOMAIN_SAVE_RUNNING = 1 << 1, /* Favor running over paused */ VIR_DOMAIN_SAVE_PAUSED = 1 << 2, /* Favor paused over running */ + VIR_DOMAIN_SAVE_RESET_NVRAM = 1 << 3, /* Re-initialize NVRAM from template */ } virDomainSaveRestoreFlags; int virDomainSave (virDomainPtr domain, diff --git a/src/libvirt-domain.c b/src/libvirt-domain.c index 5912551a49..75796affc7 100644 --- a/src/libvirt-domain.c +++ b/src/libvirt-domain.c @@ -154,6 +154,10 @@ virDomainGetConnect(virDomainPtr dom) * block attempts at migration. Hypervisors may also block save-to-file, * or snapshots. * + * If @flags includes VIR_DOMAIN_START_RESET_NVRAM, then libvirt will + * discard any existing NVRAM file and re-initialize NVRAM from the + * pristine template. + * * virDomainFree should be used to free the resources after the * domain object is no longer needed. * @@ -1015,6 +1019,10 @@ virDomainRestore(virConnectPtr conn, const char *from) * @flags will override the default read from the file. These two * flags are mutually exclusive. * + * If @flags includes VIR_DOMAIN_SAVE_RESET_NVRAM, then libvirt will + * discard any existing NVRAM file and re-initialize NVRAM from the + * pristine template. + * * Returns 0 in case of success and -1 in case of failure. */ int @@ -6764,6 +6772,10 @@ virDomainCreate(virDomainPtr domain) * If the VIR_DOMAIN_START_FORCE_BOOT flag is set, then any managed save * file for this domain is discarded, and the domain boots from scratch. * + * If @flags includes VIR_DOMAIN_START_RESET_NVRAM, then libvirt will + * discard any existing NVRAM file and re-initialize NVRAM from the + * pristine template. + * * Returns 0 in case of success, -1 in case of error */ int @@ -6836,6 +6848,10 @@ virDomainCreateWithFlags(virDomainPtr domain, unsigned int flags) * If the VIR_DOMAIN_START_FORCE_BOOT flag is set, then any managed save * file for this domain is discarded, and the domain boots from scratch. * + * If @flags includes VIR_DOMAIN_START_RESET_NVRAM, then libvirt will + * discard any existing NVRAM file and re-initialize NVRAM from the + * pristine template. + * * Returns 0 in case of success, -1 in case of error */ int -- 2.34.1

We can now replace the existing NVRAM file on startup when the API requests this. Signed-off-by: Daniel P. Berrangé <berrange@redhat.com> --- src/qemu/qemu_driver.c | 24 ++++++++++++++++++------ src/qemu/qemu_process.c | 8 +++++--- src/qemu/qemu_process.h | 1 + src/qemu/qemu_saveimage.c | 9 +++++++-- src/qemu/qemu_saveimage.h | 1 + src/qemu/qemu_snapshot.c | 6 +++++- 6 files changed, 37 insertions(+), 12 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 698f57f00e..4831a81a78 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -1589,7 +1589,8 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn, virCheckFlags(VIR_DOMAIN_START_PAUSED | VIR_DOMAIN_START_AUTODESTROY | - VIR_DOMAIN_START_VALIDATE, NULL); + VIR_DOMAIN_START_VALIDATE | + VIR_DOMAIN_START_RESET_NVRAM, NULL); if (flags & VIR_DOMAIN_START_VALIDATE) parse_flags |= VIR_DOMAIN_DEF_PARSE_VALIDATE_SCHEMA; @@ -1597,6 +1598,8 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn, start_flags |= VIR_QEMU_PROCESS_START_PAUSED; if (flags & VIR_DOMAIN_START_AUTODESTROY) start_flags |= VIR_QEMU_PROCESS_START_AUTODESTROY; + if (flags & VIR_DOMAIN_START_RESET_NVRAM) + start_flags |= VIR_QEMU_PROCESS_START_RESET_NVRAM; virNWFilterReadLockFilterUpdates(); @@ -5754,11 +5757,15 @@ qemuDomainRestoreFlags(virConnectPtr conn, virQEMUSaveData *data = NULL; virFileWrapperFd *wrapperFd = NULL; bool hook_taint = false; + bool reset_nvram = false; virCheckFlags(VIR_DOMAIN_SAVE_BYPASS_CACHE | VIR_DOMAIN_SAVE_RUNNING | - VIR_DOMAIN_SAVE_PAUSED, -1); + VIR_DOMAIN_SAVE_PAUSED | + VIR_DOMAIN_SAVE_RESET_NVRAM, -1); + if (flags & VIR_DOMAIN_SAVE_RESET_NVRAM) + reset_nvram = true; virNWFilterReadLockFilterUpdates(); @@ -5820,7 +5827,7 @@ qemuDomainRestoreFlags(virConnectPtr conn, goto cleanup; ret = qemuSaveImageStartVM(conn, driver, vm, &fd, data, path, - false, QEMU_ASYNC_JOB_START); + false, reset_nvram, QEMU_ASYNC_JOB_START); qemuProcessEndJob(driver, vm); @@ -6029,6 +6036,7 @@ qemuDomainObjRestore(virConnectPtr conn, const char *path, bool start_paused, bool bypass_cache, + bool reset_nvram, qemuDomainAsyncJob asyncJob) { g_autoptr(virDomainDef) def = NULL; @@ -6087,7 +6095,7 @@ qemuDomainObjRestore(virConnectPtr conn, virDomainObjAssignDef(vm, &def, true, NULL); ret = qemuSaveImageStartVM(conn, driver, vm, &fd, data, path, - start_paused, asyncJob); + start_paused, reset_nvram, asyncJob); cleanup: virQEMUSaveDataFree(data); @@ -6299,11 +6307,13 @@ qemuDomainObjStart(virConnectPtr conn, bool autodestroy = (flags & VIR_DOMAIN_START_AUTODESTROY) != 0; bool bypass_cache = (flags & VIR_DOMAIN_START_BYPASS_CACHE) != 0; bool force_boot = (flags & VIR_DOMAIN_START_FORCE_BOOT) != 0; + bool reset_nvram = (flags & VIR_DOMAIN_START_RESET_NVRAM) != 0; unsigned int start_flags = VIR_QEMU_PROCESS_START_COLD; qemuDomainObjPrivate *priv = vm->privateData; start_flags |= start_paused ? VIR_QEMU_PROCESS_START_PAUSED : 0; start_flags |= autodestroy ? VIR_QEMU_PROCESS_START_AUTODESTROY : 0; + start_flags |= reset_nvram ? VIR_QEMU_PROCESS_START_RESET_NVRAM : 0; /* * If there is a managed saved state restore it instead of starting @@ -6328,7 +6338,8 @@ qemuDomainObjStart(virConnectPtr conn, priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_RESTORE; ret = qemuDomainObjRestore(conn, driver, vm, managed_save, - start_paused, bypass_cache, asyncJob); + start_paused, bypass_cache, + reset_nvram, asyncJob); if (ret == 0) { if (unlink(managed_save) < 0) @@ -6380,7 +6391,8 @@ qemuDomainCreateWithFlags(virDomainPtr dom, unsigned int flags) virCheckFlags(VIR_DOMAIN_START_PAUSED | VIR_DOMAIN_START_AUTODESTROY | VIR_DOMAIN_START_BYPASS_CACHE | - VIR_DOMAIN_START_FORCE_BOOT, -1); + VIR_DOMAIN_START_FORCE_BOOT | + VIR_DOMAIN_START_RESET_NVRAM, -1); virNWFilterReadLockFilterUpdates(); diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index bc7c2a4dbc..659fba8672 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -4411,7 +4411,8 @@ qemuProcessUpdateCPU(virQEMUDriver *driver, static int qemuPrepareNVRAM(virQEMUDriver *driver, - virDomainObj *vm) + virDomainObj *vm, + bool reset_nvram) { g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); int ret = -1; @@ -4423,7 +4424,8 @@ qemuPrepareNVRAM(virQEMUDriver *driver, ssize_t r; g_autofree char *tmp_dst_path = NULL; - if (!loader || !loader->nvram || virFileExists(loader->nvram)) + if (!loader || !loader->nvram || + (virFileExists(loader->nvram) && !reset_nvram)) return 0; master_nvram_path = loader->templt; @@ -6973,7 +6975,7 @@ qemuProcessPrepareHost(virQEMUDriver *driver, qemuProcessMakeDir(driver, vm, priv->channelTargetDir) < 0) return -1; - if (qemuPrepareNVRAM(driver, vm) < 0) + if (qemuPrepareNVRAM(driver, vm, flags & VIR_QEMU_PROCESS_START_RESET_NVRAM) < 0) return -1; if (vm->def->vsock) { diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h index 1b1cc489f0..f6c0d63d11 100644 --- a/src/qemu/qemu_process.h +++ b/src/qemu/qemu_process.h @@ -79,6 +79,7 @@ typedef enum { VIR_QEMU_PROCESS_START_PRETEND = 1 << 3, VIR_QEMU_PROCESS_START_NEW = 1 << 4, /* internal, new VM is starting */ VIR_QEMU_PROCESS_START_GEN_VMID = 1 << 5, /* Generate a new VMID */ + VIR_QEMU_PROCESS_START_RESET_NVRAM = 1 << 5, /* Re-initialize NVRAM from template */ } qemuProcessStartFlags; int qemuProcessStart(virConnectPtr conn, diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c index 557ee2cd21..c0139041eb 100644 --- a/src/qemu/qemu_saveimage.c +++ b/src/qemu/qemu_saveimage.c @@ -577,6 +577,7 @@ qemuSaveImageStartVM(virConnectPtr conn, virQEMUSaveData *data, const char *path, bool start_paused, + bool reset_nvram, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivate *priv = vm->privateData; @@ -590,6 +591,11 @@ qemuSaveImageStartVM(virConnectPtr conn, virQEMUSaveHeader *header = &data->header; g_autoptr(qemuDomainSaveCookie) cookie = NULL; int rc = 0; + unsigned int start_flags = VIR_QEMU_PROCESS_START_PAUSED | + VIR_QEMU_PROCESS_START_GEN_VMID; + + if (reset_nvram) + start_flags |= VIR_QEMU_PROCESS_START_RESET_NVRAM; if (virSaveCookieParseString(data->cookie, (virObject **)&cookie, virDomainXMLOptionGetSaveCookie(driver->xmlopt)) < 0) @@ -628,8 +634,7 @@ qemuSaveImageStartVM(virConnectPtr conn, if (qemuProcessStart(conn, driver, vm, cookie ? cookie->cpu : NULL, asyncJob, "stdio", *fd, path, NULL, VIR_NETDEV_VPORT_PROFILE_OP_RESTORE, - VIR_QEMU_PROCESS_START_PAUSED | - VIR_QEMU_PROCESS_START_GEN_VMID) == 0) + start_flags) == 0) started = true; if (intermediatefd != -1) { diff --git a/src/qemu/qemu_saveimage.h b/src/qemu/qemu_saveimage.h index 45c5f35e11..a0daa4ad2b 100644 --- a/src/qemu/qemu_saveimage.h +++ b/src/qemu/qemu_saveimage.h @@ -67,6 +67,7 @@ qemuSaveImageStartVM(virConnectPtr conn, virQEMUSaveData *data, const char *path, bool start_paused, + bool reset_nvram, qemuDomainAsyncJob asyncJob) ATTRIBUTE_NONNULL(4) ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(6); diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index 1887c70708..a99f1246e0 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -2243,7 +2243,11 @@ qemuSnapshotRevert(virDomainObj *vm, virCheckFlags(VIR_DOMAIN_SNAPSHOT_REVERT_RUNNING | VIR_DOMAIN_SNAPSHOT_REVERT_PAUSED | - VIR_DOMAIN_SNAPSHOT_REVERT_FORCE, -1); + VIR_DOMAIN_SNAPSHOT_REVERT_FORCE | + VIR_DOMAIN_SNAPSHOT_REVERT_RESET_NVRAM, -1); + + if (flags & VIR_DOMAIN_SNAPSHOT_REVERT_RESET_NVRAM) + start_flags |= VIR_QEMU_PROCESS_START_RESET_NVRAM; /* We have the following transitions, which create the following events: * 1. inactive -> inactive: none -- 2.34.1

This wires up support for resetting NVRAM for all APIs that allow this feature. Signed-off-by: Daniel P. Berrangé <berrange@redhat.com> --- docs/manpages/virsh.rst | 21 +++++++++++++++++---- tools/virsh-domain.c | 18 ++++++++++++++++++ tools/virsh-snapshot.c | 6 ++++++ 3 files changed, 41 insertions(+), 4 deletions(-) diff --git a/docs/manpages/virsh.rst b/docs/manpages/virsh.rst index e28927ed6c..429879d2dd 100644 --- a/docs/manpages/virsh.rst +++ b/docs/manpages/virsh.rst @@ -1455,7 +1455,7 @@ create :: create FILE [--console] [--paused] [--autodestroy] - [--pass-fds N,M,...] [--validate] + [--pass-fds N,M,...] [--validate] [--reset-nvram] Create a domain from an XML <file>. Optionally, *--validate* option can be passed to validate the format of the input XML file against an internal RNG @@ -1478,6 +1478,9 @@ of open file descriptors which should be pass on into the guest. The file descriptors will be re-numbered in the guest, starting from 3. This is only supported with container based virtualization. +If *--reset-nvram* is specified, any existing NVRAM file will be deleted +and re-initialized from its pristine template. + **Example:** #. prepare a template from an existing domain (skip directly to 3a if writing @@ -3736,7 +3739,7 @@ restore :: restore state-file [--bypass-cache] [--xml file] - [{--running | --paused}] + [{--running | --paused}] [--reset-nvram] Restores a domain from a ``virsh save`` state file. See *save* for more info. @@ -3754,6 +3757,9 @@ save image to decide between running or paused; passing either the *--running* or *--paused* flag will allow overriding which state the domain should be started in. +If *--reset-nvram* is specified, any existing NVRAM file will be deleted +and re-initialized from its pristine template. + ``Note``: To avoid corrupting file system contents within the domain, you should not reuse the saved state file for a second ``restore`` unless you have also reverted all storage volumes back to the same contents as when @@ -4350,7 +4356,7 @@ start start domain-name-or-uuid [--console] [--paused] [--autodestroy] [--bypass-cache] [--force-boot] - [--pass-fds N,M,...] + [--pass-fds N,M,...] [--reset-nvram] Start a (previously defined) inactive domain, either from the last ``managedsave`` state, or via a fresh boot if no managedsave state is @@ -4369,6 +4375,9 @@ of open file descriptors which should be pass on into the guest. The file descriptors will be re-numbered in the guest, starting from 3. This is only supported with container based virtualization. +If *--reset-nvram* is specified, any existing NVRAM file will be deleted +and re-initialized from its pristine template. + suspend ------- @@ -7352,7 +7361,8 @@ snapshot-revert :: - snapshot-revert domain {snapshot | --current} [{--running | --paused}] [--force] + snapshot-revert domain {snapshot | --current} [{--running | --paused}] + [--force] [--reset-nvram] Revert the given domain to the snapshot specified by *snapshot*, or to the current snapshot with *--current*. Be aware @@ -7398,6 +7408,9 @@ requires the use of *--force* to proceed: likely cause extensive filesystem corruption or crashes due to swap content mismatches when run. +If *--reset-nvram* is specified, any existing NVRAM file will be deleted +and re-initialized from its pristine template. + snapshot-delete --------------- diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 43d310f2af..97986788d7 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -4010,6 +4010,10 @@ static const vshCmdOptDef opts_start[] = { .completer = virshCompleteEmpty, .help = N_("pass file descriptors N,M,... to the guest") }, + {.name = "reset-nvram", + .type = VSH_OT_BOOL, + .help = N_("re-initialize NVRAM from its pristine template") + }, {.name = NULL} }; @@ -4087,6 +4091,8 @@ cmdStart(vshControl *ctl, const vshCmd *cmd) flags |= VIR_DOMAIN_START_BYPASS_CACHE; if (vshCommandOptBool(cmd, "force-boot")) flags |= VIR_DOMAIN_START_FORCE_BOOT; + if (vshCommandOptBool(cmd, "reset-nvram")) + flags |= VIR_DOMAIN_START_RESET_NVRAM; /* We can emulate force boot, even for older servers that reject it. */ if (flags & VIR_DOMAIN_START_FORCE_BOOT) { @@ -5268,6 +5274,10 @@ static const vshCmdOptDef opts_restore[] = { .type = VSH_OT_BOOL, .help = N_("restore domain into paused state") }, + {.name = "reset-nvram", + .type = VSH_OT_BOOL, + .help = N_("re-initialize NVRAM from its pristine template") + }, {.name = NULL} }; @@ -5289,6 +5299,8 @@ cmdRestore(vshControl *ctl, const vshCmd *cmd) flags |= VIR_DOMAIN_SAVE_RUNNING; if (vshCommandOptBool(cmd, "paused")) flags |= VIR_DOMAIN_SAVE_PAUSED; + if (vshCommandOptBool(cmd, "reset-nvram")) + flags |= VIR_DOMAIN_SAVE_RESET_NVRAM; if (vshCommandOptStringReq(ctl, cmd, "xml", &xmlfile) < 0) return false; @@ -8093,6 +8105,10 @@ static const vshCmdOptDef opts_create[] = { .type = VSH_OT_BOOL, .help = N_("validate the XML against the schema") }, + {.name = "reset-nvram", + .type = VSH_OT_BOOL, + .help = N_("re-initialize NVRAM from its pristine template") + }, {.name = NULL} }; @@ -8125,6 +8141,8 @@ cmdCreate(vshControl *ctl, const vshCmd *cmd) flags |= VIR_DOMAIN_START_AUTODESTROY; if (vshCommandOptBool(cmd, "validate")) flags |= VIR_DOMAIN_START_VALIDATE; + if (vshCommandOptBool(cmd, "reset-nvram")) + flags |= VIR_DOMAIN_START_RESET_NVRAM; if (nfds) dom = virDomainCreateXMLWithFiles(priv->conn, buffer, nfds, fds, flags); diff --git a/tools/virsh-snapshot.c b/tools/virsh-snapshot.c index 154e82b48b..b86fa73ac2 100644 --- a/tools/virsh-snapshot.c +++ b/tools/virsh-snapshot.c @@ -1725,6 +1725,10 @@ static const vshCmdOptDef opts_snapshot_revert[] = { .type = VSH_OT_BOOL, .help = N_("try harder on risky reverts") }, + {.name = "reset-nvram", + .type = VSH_OT_BOOL, + .help = N_("re-initialize NVRAM from its pristine template") + }, {.name = NULL} }; @@ -1742,6 +1746,8 @@ cmdDomainSnapshotRevert(vshControl *ctl, const vshCmd *cmd) flags |= VIR_DOMAIN_SNAPSHOT_REVERT_RUNNING; if (vshCommandOptBool(cmd, "paused")) flags |= VIR_DOMAIN_SNAPSHOT_REVERT_PAUSED; + if (vshCommandOptBool(cmd, "reset-nvram")) + flags |= VIR_DOMAIN_SNAPSHOT_REVERT_RESET_NVRAM; /* We want virsh snapshot-revert --force to work even when talking * to older servers that did the unsafe revert by default but * reject the flag, so we probe without the flag, and only use it -- 2.34.1

On a Monday in 2022, Daniel P. Berrangé wrote:
Daniel P. Berrangé (4): qemu: do crash safe creation of NVRAM file include: define constants for resetting NVRAM state qemu: wire up support for resetting NVRAM tools: add --reset-nvram arg to several virsh commands
docs/manpages/virsh.rst | 21 ++++++++++++--- include/libvirt/libvirt-domain-snapshot.h | 1 + include/libvirt/libvirt-domain.h | 2 ++ src/libvirt-domain.c | 16 ++++++++++++ src/qemu/qemu_driver.c | 24 ++++++++++++----- src/qemu/qemu_process.c | 32 ++++++++++++++++++----- src/qemu/qemu_process.h | 1 + src/qemu/qemu_saveimage.c | 9 +++++-- src/qemu/qemu_saveimage.h | 1 + src/qemu/qemu_snapshot.c | 6 ++++- tools/virsh-domain.c | 18 +++++++++++++ tools/virsh-snapshot.c | 6 +++++ 12 files changed, 117 insertions(+), 20 deletions(-)
Reviewed-by: Ján Tomko <jtomko@redhat.com> Jano
participants (2)
-
Daniel P. Berrangé
-
Ján Tomko