[PATCH V4 00/18] qemu: support mapped-ram+directio+mulitfd

V4 series adding support for QEMU's mapped-ram stream format [1] and migration capability. The use of mapped-ram is controlled by extending save_image_format setting in qemu.conf with a new 'sparse' option. The 'raw' format continues to be the default. Also included are patches that leverage mapped-ram to add support for parallel save/restore. Changes in V4: * Rebased on latest master, including the series "qemu: Support specifying save image format" V1: https://lists.libvirt.org/archives/list/devel@lists.libvirt.org/message/MNBH... V2: https://lists.libvirt.org/archives/list/devel@lists.libvirt.org/message/GNJN... V3: https://lists.libvirt.org/archives/list/devel@lists.libvirt.org/message/SK2P... [1] https://gitlab.com/qemu-project/qemu/-/blob/master/docs/devel/migration/mapp... Claudio Fontana (3): include: Define constants for parallel save/restore tools: add parallel parameter to virsh save command tools: add parallel parameter to virsh restore command Jim Fehlig (15): lib: virDomain{Save,Restore}Params: Ensure absolute path qemu: Add function to get FDPass object from monitor qemu: Add function to check capability in migration params qemu: Add function to get bool value from migration params qemu: Add mapped-ram migration capability qemu: Add function to get migration params for save qemu_saveimage: add "sparse" to supported save image formats qemu: Add helper function for creating save image fd qemu: Add support for mapped-ram on save qemu: Move creation of qemuProcessIncomingDef struct qemu: Apply migration parameters in qemuMigrationDstRun qemu: Add support for mapped-ram on restore qemu: Support O_DIRECT with mapped-ram on save qemu: Support O_DIRECT with mapped-ram on restore qemu: Add support for parallel save and restore docs/manpages/virsh.rst | 21 +++- include/libvirt/libvirt-domain.h | 11 ++ src/libvirt-domain.c | 92 +++++++++++--- src/qemu/qemu.conf.in | 9 +- src/qemu/qemu_driver.c | 78 +++++++++--- src/qemu/qemu_fd.c | 46 +++++++ src/qemu/qemu_fd.h | 4 + src/qemu/qemu_migration.c | 204 ++++++++++++++++++++++--------- src/qemu/qemu_migration.h | 10 +- src/qemu/qemu_migration_params.c | 92 ++++++++++++++ src/qemu/qemu_migration_params.h | 17 +++ src/qemu/qemu_monitor.c | 37 ++++++ src/qemu/qemu_monitor.h | 5 + src/qemu/qemu_process.c | 100 ++++++++++----- src/qemu/qemu_process.h | 19 ++- src/qemu/qemu_saveimage.c | 120 ++++++++++++------ src/qemu/qemu_saveimage.h | 4 + src/qemu/qemu_snapshot.c | 15 ++- tools/virsh-domain.c | 81 ++++++++++-- 19 files changed, 775 insertions(+), 190 deletions(-) -- 2.43.0

When invoking virDomainSaveParams with a relative path, the image is saved to the daemon's CWD. Similarly, when providing virDomainRestoreParams with a relative path, it attempts to restore from the daemon's CWD. In most configurations, the daemon's CWD is set to '/'. Ensure a relative path is converted to absolute before invoking the driver domain{Save,Restore}Params functions. Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/libvirt-domain.c | 89 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 73 insertions(+), 16 deletions(-) diff --git a/src/libvirt-domain.c b/src/libvirt-domain.c index 18451ebdb9..0ee7c6f45b 100644 --- a/src/libvirt-domain.c +++ b/src/libvirt-domain.c @@ -1023,6 +1023,11 @@ virDomainSaveParams(virDomainPtr domain, unsigned int flags) { virConnectPtr conn; + virTypedParameterPtr params_copy = NULL; + int nparams_copy = 0; + const char *to = NULL; + g_autofree char *absolute_to = NULL; + int ret = -1; VIR_DOMAIN_DEBUG(domain, "params=%p, nparams=%d, flags=0x%x", params, nparams, flags); @@ -1033,23 +1038,46 @@ virDomainSaveParams(virDomainPtr domain, virCheckDomainReturn(domain, -1); conn = domain->conn; - virCheckReadOnlyGoto(conn->flags, error); + virCheckReadOnlyGoto(conn->flags, done); VIR_EXCLUSIVE_FLAGS_GOTO(VIR_DOMAIN_SAVE_RUNNING, VIR_DOMAIN_SAVE_PAUSED, - error); + done); + + /* We must absolutize the file path as the save is done out of process */ + virTypedParamsCopy(¶ms_copy, params, nparams); + nparams_copy = nparams; + if (virTypedParamsGetString(params_copy, nparams_copy, + VIR_DOMAIN_SAVE_PARAM_FILE, &to) < 0) + goto done; + + if (to) { + if (!(absolute_to = g_canonicalize_filename(to, NULL))) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("could not build absolute output file path")); + goto done; + } + + if (virTypedParamsReplaceString(¶ms_copy, &nparams_copy, + VIR_DOMAIN_SAVE_PARAM_FILE, + absolute_to) < 0) + goto done; + } if (conn->driver->domainSaveParams) { - if (conn->driver->domainSaveParams(domain, params, nparams, flags) < 0) - goto error; - return 0; + if (conn->driver->domainSaveParams(domain, params_copy, nparams_copy, flags) < 0) + goto done; + ret = 0; + } else { + virReportUnsupportedError(); } - virReportUnsupportedError(); + done: + if (ret < 0) + virDispatchError(domain->conn); + virTypedParamsFree(params_copy, nparams_copy); - error: - virDispatchError(domain->conn); - return -1; + return ret; } @@ -1206,6 +1234,12 @@ virDomainRestoreParams(virConnectPtr conn, virTypedParameterPtr params, int nparams, unsigned int flags) { + virTypedParameterPtr params_copy = NULL; + int nparams_copy = 0; + const char *from = NULL; + g_autofree char *absolute_from = NULL; + int ret = -1; + VIR_DEBUG("conn=%p, params=%p, nparams=%d, flags=0x%x", conn, params, nparams, flags); VIR_TYPED_PARAMS_DEBUG(params, nparams); @@ -1213,23 +1247,46 @@ virDomainRestoreParams(virConnectPtr conn, virResetLastError(); virCheckConnectReturn(conn, -1); - virCheckReadOnlyGoto(conn->flags, error); + virCheckReadOnlyGoto(conn->flags, done); VIR_EXCLUSIVE_FLAGS_GOTO(VIR_DOMAIN_SAVE_RUNNING, VIR_DOMAIN_SAVE_PAUSED, - error); + done); if (conn->driver->domainRestoreParams) { + /* We must absolutize the file path as the save is done out of process */ + virTypedParamsCopy(¶ms_copy, params, nparams); + nparams_copy = nparams; + if (virTypedParamsGetString(params_copy, nparams_copy, + VIR_DOMAIN_SAVE_PARAM_FILE, &from) < 0) + goto done; + + if (from) { + if (!(absolute_from = g_canonicalize_filename(from, NULL))) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("could not build absolute output file path")); + goto done; + } + + if (virTypedParamsReplaceString(¶ms_copy, &nparams_copy, + VIR_DOMAIN_SAVE_PARAM_FILE, + absolute_from) < 0) + goto done; + } + if (conn->driver->domainRestoreParams(conn, params, nparams, flags) < 0) - goto error; - return 0; + goto done; + ret = 0; } virReportUnsupportedError(); - error: - virDispatchError(conn); - return -1; + done: + if (ret < 0) + virDispatchError(conn); + virTypedParamsFree(params_copy, nparams_copy); + + return ret; } -- 2.43.0

On Wed, Mar 05, 2025 at 03:48:10PM -0700, Jim Fehlig via Devel wrote:
When invoking virDomainSaveParams with a relative path, the image is saved to the daemon's CWD. Similarly, when providing virDomainRestoreParams with a relative path, it attempts to restore from the daemon's CWD. In most configurations, the daemon's CWD is set to '/'. Ensure a relative path is converted to absolute before invoking the driver domain{Save,Restore}Params functions.
Are you aware of any common usage of these APIs with a relative path ? Although we've not documented it, my general view is that all paths given to libvirt are expected to be absolute, everywhere - whether API parameters like these, or config file parmeters, or XML elements/attributes. In the perfect world we would canonicalize all relative paths on the client side, but doing that is such an enourmous & complex job it is not likely to be practical. We'd be better off just documenting use of relative paths as "out of scope" and / or "undefined behaviour"
Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/libvirt-domain.c | 89 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 73 insertions(+), 16 deletions(-)
diff --git a/src/libvirt-domain.c b/src/libvirt-domain.c index 18451ebdb9..0ee7c6f45b 100644 --- a/src/libvirt-domain.c +++ b/src/libvirt-domain.c @@ -1023,6 +1023,11 @@ virDomainSaveParams(virDomainPtr domain, unsigned int flags) { virConnectPtr conn; + virTypedParameterPtr params_copy = NULL; + int nparams_copy = 0; + const char *to = NULL; + g_autofree char *absolute_to = NULL; + int ret = -1;
VIR_DOMAIN_DEBUG(domain, "params=%p, nparams=%d, flags=0x%x", params, nparams, flags); @@ -1033,23 +1038,46 @@ virDomainSaveParams(virDomainPtr domain, virCheckDomainReturn(domain, -1); conn = domain->conn;
- virCheckReadOnlyGoto(conn->flags, error); + virCheckReadOnlyGoto(conn->flags, done);
VIR_EXCLUSIVE_FLAGS_GOTO(VIR_DOMAIN_SAVE_RUNNING, VIR_DOMAIN_SAVE_PAUSED, - error); + done); + + /* We must absolutize the file path as the save is done out of process */ + virTypedParamsCopy(¶ms_copy, params, nparams); + nparams_copy = nparams; + if (virTypedParamsGetString(params_copy, nparams_copy, + VIR_DOMAIN_SAVE_PARAM_FILE, &to) < 0) + goto done; + + if (to) { + if (!(absolute_to = g_canonicalize_filename(to, NULL))) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("could not build absolute output file path")); + goto done; + } + + if (virTypedParamsReplaceString(¶ms_copy, &nparams_copy, + VIR_DOMAIN_SAVE_PARAM_FILE, + absolute_to) < 0) + goto done; + }
if (conn->driver->domainSaveParams) { - if (conn->driver->domainSaveParams(domain, params, nparams, flags) < 0) - goto error; - return 0; + if (conn->driver->domainSaveParams(domain, params_copy, nparams_copy, flags) < 0) + goto done; + ret = 0; + } else { + virReportUnsupportedError(); }
- virReportUnsupportedError(); + done: + if (ret < 0) + virDispatchError(domain->conn); + virTypedParamsFree(params_copy, nparams_copy);
- error: - virDispatchError(domain->conn); - return -1; + return ret; }
@@ -1206,6 +1234,12 @@ virDomainRestoreParams(virConnectPtr conn, virTypedParameterPtr params, int nparams, unsigned int flags) { + virTypedParameterPtr params_copy = NULL; + int nparams_copy = 0; + const char *from = NULL; + g_autofree char *absolute_from = NULL; + int ret = -1; + VIR_DEBUG("conn=%p, params=%p, nparams=%d, flags=0x%x", conn, params, nparams, flags); VIR_TYPED_PARAMS_DEBUG(params, nparams); @@ -1213,23 +1247,46 @@ virDomainRestoreParams(virConnectPtr conn, virResetLastError();
virCheckConnectReturn(conn, -1); - virCheckReadOnlyGoto(conn->flags, error); + virCheckReadOnlyGoto(conn->flags, done);
VIR_EXCLUSIVE_FLAGS_GOTO(VIR_DOMAIN_SAVE_RUNNING, VIR_DOMAIN_SAVE_PAUSED, - error); + done);
if (conn->driver->domainRestoreParams) { + /* We must absolutize the file path as the save is done out of process */ + virTypedParamsCopy(¶ms_copy, params, nparams); + nparams_copy = nparams; + if (virTypedParamsGetString(params_copy, nparams_copy, + VIR_DOMAIN_SAVE_PARAM_FILE, &from) < 0) + goto done; + + if (from) { + if (!(absolute_from = g_canonicalize_filename(from, NULL))) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("could not build absolute output file path")); + goto done; + } + + if (virTypedParamsReplaceString(¶ms_copy, &nparams_copy, + VIR_DOMAIN_SAVE_PARAM_FILE, + absolute_from) < 0) + goto done; + } + if (conn->driver->domainRestoreParams(conn, params, nparams, flags) < 0) - goto error; - return 0; + goto done; + ret = 0; }
virReportUnsupportedError();
- error: - virDispatchError(conn); - return -1; + done: + if (ret < 0) + virDispatchError(conn); + virTypedParamsFree(params_copy, nparams_copy); + + return ret; }
-- 2.43.0
With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

On 3/19/25 05:54, Daniel P. Berrangé wrote:
On Wed, Mar 05, 2025 at 03:48:10PM -0700, Jim Fehlig via Devel wrote:
When invoking virDomainSaveParams with a relative path, the image is saved to the daemon's CWD. Similarly, when providing virDomainRestoreParams with a relative path, it attempts to restore from the daemon's CWD. In most configurations, the daemon's CWD is set to '/'. Ensure a relative path is converted to absolute before invoking the driver domain{Save,Restore}Params functions.
Are you aware of any common usage of these APIs with a relative path ?
No. I added this patch to the series after receiving feedback from testers that included "Providing a relative path to the location of the saved VM does not work". E.g. something like # virsh restore --bypass-cache test/test-vm.sav error: Failed to restore domain from test/test-vm.sav error: Failed to open file 'test/test-vm.sav': No such file or directory"
Although we've not documented it, my general view is that all paths given to libvirt are expected to be absolute, everywhere - whether API parameters like these, or config file parmeters, or XML elements/attributes.
Agreed. Relative paths from (remote) clients are ambiguous on the server. I'm fine dropping this patch from the series.
In the perfect world we would canonicalize all relative paths on the client side, but doing that is such an enourmous & complex job it is not likely to be practical. We'd be better off just documenting use of relative paths as "out of scope" and / or "undefined behaviour"
I'll take a stab at improving the documentation. Regards, Jim

On Wed, Mar 19, 2025 at 05:43:20PM -0600, Jim Fehlig via Devel wrote:
On 3/19/25 05:54, Daniel P. Berrangé wrote:
On Wed, Mar 05, 2025 at 03:48:10PM -0700, Jim Fehlig via Devel wrote:
When invoking virDomainSaveParams with a relative path, the image is saved to the daemon's CWD. Similarly, when providing virDomainRestoreParams with a relative path, it attempts to restore from the daemon's CWD. In most configurations, the daemon's CWD is set to '/'. Ensure a relative path is converted to absolute before invoking the driver domain{Save,Restore}Params functions.
Are you aware of any common usage of these APIs with a relative path ?
No. I added this patch to the series after receiving feedback from testers that included "Providing a relative path to the location of the saved VM does not work". E.g. something like
# virsh restore --bypass-cache test/test-vm.sav error: Failed to restore domain from test/test-vm.sav error: Failed to open file 'test/test-vm.sav': No such file or directory"
Although we've not documented it, my general view is that all paths given to libvirt are expected to be absolute, everywhere - whether API parameters like these, or config file parmeters, or XML elements/attributes.
Agreed. Relative paths from (remote) clients are ambiguous on the server. I'm fine dropping this patch from the series.
In the perfect world we would canonicalize all relative paths on the client side, but doing that is such an enourmous & complex job it is not likely to be practical. We'd be better off just documenting use of relative paths as "out of scope" and / or "undefined behaviour"
I'll take a stab at improving the documentation.
So this actually breaks an existing usage, albeit from a simple user (e.g. myself). And because later patches switch from virDomain{Save,Restore}Flags to virDomain{Save,Restore}Params, this might be seen as a regression. If we do not want to canonicalize the paths, then we should error out on relative ones. Without that the current behaviour is not only what's described above (and how I noticed it), but also the following two commands: virsh save domain asdf.img virsh save --image-format raw asdf.img will behave differently. The first one saves the image in CWD of virsh, the second one will save the same image in CWD of virtqemud (or respective daemon), without any error or indication of that happening. Consequently, `virsh restore` will restore from a different file based on whether there are extra flags/parameters or not. Either we need to disallow relative paths or canonicalize them on the client, and ideally before the release.
Regards, Jim

On Tue, Mar 25, 2025 at 10:49:49AM +0100, Martin Kletzander wrote:
On Wed, Mar 19, 2025 at 05:43:20PM -0600, Jim Fehlig via Devel wrote:
On 3/19/25 05:54, Daniel P. Berrangé wrote:
On Wed, Mar 05, 2025 at 03:48:10PM -0700, Jim Fehlig via Devel wrote:
When invoking virDomainSaveParams with a relative path, the image is saved to the daemon's CWD. Similarly, when providing virDomainRestoreParams with a relative path, it attempts to restore from the daemon's CWD. In most configurations, the daemon's CWD is set to '/'. Ensure a relative path is converted to absolute before invoking the driver domain{Save,Restore}Params functions.
Are you aware of any common usage of these APIs with a relative path ?
No. I added this patch to the series after receiving feedback from testers that included "Providing a relative path to the location of the saved VM does not work". E.g. something like
# virsh restore --bypass-cache test/test-vm.sav error: Failed to restore domain from test/test-vm.sav error: Failed to open file 'test/test-vm.sav': No such file or directory"
Although we've not documented it, my general view is that all paths given to libvirt are expected to be absolute, everywhere - whether API parameters like these, or config file parmeters, or XML elements/attributes.
Agreed. Relative paths from (remote) clients are ambiguous on the server. I'm fine dropping this patch from the series.
In the perfect world we would canonicalize all relative paths on the client side, but doing that is such an enourmous & complex job it is not likely to be practical. We'd be better off just documenting use of relative paths as "out of scope" and / or "undefined behaviour"
I'll take a stab at improving the documentation.
So this actually breaks an existing usage, albeit from a simple user (e.g. myself). And because later patches switch from virDomain{Save,Restore}Flags to virDomain{Save,Restore}Params, this might be seen as a regression. If we do not want to canonicalize the paths, then we should error out on relative ones. Without that the current behaviour is not only what's described above (and how I noticed it), but also the following two commands:
virsh save domain asdf.img virsh save --image-format raw asdf.img
will behave differently. The first one saves the image in CWD of virsh, the second one will save the same image in CWD of virtqemud (or respective daemon), without any error or indication of that happening. Consequently, `virsh restore` will restore from a different file based on whether there are extra flags/parameters or not.
Urgh, right, I missed that we already have canonicalization in the virDomainSave method.
Either we need to disallow relative paths or canonicalize them on the client, and ideally before the release.
Yeah, reluctantly, we need to preserve that behaviour in the newer virDomainSaveParams, to give consistent behaviour. With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

On Tue, Mar 25, 2025 at 10:15:12AM +0000, Daniel P. Berrangé wrote:
On Tue, Mar 25, 2025 at 10:49:49AM +0100, Martin Kletzander wrote:
On Wed, Mar 19, 2025 at 05:43:20PM -0600, Jim Fehlig via Devel wrote:
On 3/19/25 05:54, Daniel P. Berrangé wrote:
On Wed, Mar 05, 2025 at 03:48:10PM -0700, Jim Fehlig via Devel wrote:
When invoking virDomainSaveParams with a relative path, the image is saved to the daemon's CWD. Similarly, when providing virDomainRestoreParams with a relative path, it attempts to restore from the daemon's CWD. In most configurations, the daemon's CWD is set to '/'. Ensure a relative path is converted to absolute before invoking the driver domain{Save,Restore}Params functions.
Are you aware of any common usage of these APIs with a relative path ?
No. I added this patch to the series after receiving feedback from testers that included "Providing a relative path to the location of the saved VM does not work". E.g. something like
# virsh restore --bypass-cache test/test-vm.sav error: Failed to restore domain from test/test-vm.sav error: Failed to open file 'test/test-vm.sav': No such file or directory"
Although we've not documented it, my general view is that all paths given to libvirt are expected to be absolute, everywhere - whether API parameters like these, or config file parmeters, or XML elements/attributes.
Agreed. Relative paths from (remote) clients are ambiguous on the server. I'm fine dropping this patch from the series.
In the perfect world we would canonicalize all relative paths on the client side, but doing that is such an enourmous & complex job it is not likely to be practical. We'd be better off just documenting use of relative paths as "out of scope" and / or "undefined behaviour"
I'll take a stab at improving the documentation.
So this actually breaks an existing usage, albeit from a simple user (e.g. myself). And because later patches switch from virDomain{Save,Restore}Flags to virDomain{Save,Restore}Params, this might be seen as a regression. If we do not want to canonicalize the paths, then we should error out on relative ones. Without that the current behaviour is not only what's described above (and how I noticed it), but also the following two commands:
virsh save domain asdf.img virsh save --image-format raw asdf.img
will behave differently. The first one saves the image in CWD of virsh, the second one will save the same image in CWD of virtqemud (or respective daemon), without any error or indication of that happening. Consequently, `virsh restore` will restore from a different file based on whether there are extra flags/parameters or not.
Urgh, right, I missed that we already have canonicalization in the virDomainSave method.
Either we need to disallow relative paths or canonicalize them on the client, and ideally before the release.
Yeah, reluctantly, we need to preserve that behaviour in the newer virDomainSaveParams, to give consistent behaviour.
Yes, that behaviour is a relic of the past that we now need to live with. But that's the reason I'd give my Reviewed-by: Martin Kletzander <mkletzan@redhat.com>
With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

On 3/25/25 06:49, Martin Kletzander wrote:
On Tue, Mar 25, 2025 at 10:15:12AM +0000, Daniel P. Berrangé wrote:
On Tue, Mar 25, 2025 at 10:49:49AM +0100, Martin Kletzander wrote:
On 3/19/25 05:54, Daniel P. Berrangé wrote:
On Wed, Mar 05, 2025 at 03:48:10PM -0700, Jim Fehlig via Devel wrote:
When invoking virDomainSaveParams with a relative path, the image is saved to the daemon's CWD. Similarly, when providing virDomainRestoreParams with a relative path, it attempts to restore from the daemon's CWD. In most configurations, the daemon's CWD is set to '/'. Ensure a relative path is converted to absolute before invoking the driver domain{Save,Restore} Params functions.
Are you aware of any common usage of these APIs with a relative path ?
No. I added this patch to the series after receiving feedback from testers
On Wed, Mar 19, 2025 at 05:43:20PM -0600, Jim Fehlig via Devel wrote: that
included "Providing a relative path to the location of the saved VM does not work". E.g. something like
# virsh restore --bypass-cache test/test-vm.sav error: Failed to restore domain from test/test-vm.sav error: Failed to open file 'test/test-vm.sav': No such file or directory"
Although we've not documented it, my general view is that all paths given to libvirt are expected to be absolute, everywhere - whether API parameters like these, or config file parmeters, or XML elements/attributes.
Agreed. Relative paths from (remote) clients are ambiguous on the server. I'm fine dropping this patch from the series.
In the perfect world we would canonicalize all relative paths on the client side, but doing that is such an enourmous & complex job it is not likely to be practical. We'd be better off just documenting use of relative paths as "out of scope" and / or "undefined behaviour"
I'll take a stab at improving the documentation.
So this actually breaks an existing usage, albeit from a simple user (e.g. myself). And because later patches switch from virDomain{Save,Restore}Flags to virDomain{Save,Restore}Params, this might be seen as a regression. If we do not want to canonicalize the paths, then we should error out on relative ones. Without that the current behaviour is not only what's described above (and how I noticed it), but also the following two commands:
virsh save domain asdf.img virsh save --image-format raw asdf.img
will behave differently. The first one saves the image in CWD of virsh, the second one will save the same image in CWD of virtqemud (or respective daemon), without any error or indication of that happening. Consequently, `virsh restore` will restore from a different file based on whether there are extra flags/parameters or not.
Urgh, right, I missed that we already have canonicalization in the virDomainSave method.
I forgot about that as well when deciding to drop this patch from the series.
Either we need to disallow relative paths or canonicalize them on the client, and ideally before the release.
Yeah, reluctantly, we need to preserve that behaviour in the newer virDomainSaveParams, to give consistent behaviour.
Yes, that behaviour is a relic of the past that we now need to live with. But that's the reason I'd give my
Reviewed-by: Martin Kletzander <mkletzan@redhat.com>
Thanks. Is there agreement I should push this before the release? Regards, Jim

On Tue, Mar 25, 2025 at 08:41:41AM -0600, Jim Fehlig wrote:
On 3/25/25 06:49, Martin Kletzander wrote:
On Tue, Mar 25, 2025 at 10:15:12AM +0000, Daniel P. Berrangé wrote:
On Tue, Mar 25, 2025 at 10:49:49AM +0100, Martin Kletzander wrote:
On 3/19/25 05:54, Daniel P. Berrangé wrote:
On Wed, Mar 05, 2025 at 03:48:10PM -0700, Jim Fehlig via Devel wrote: > When invoking virDomainSaveParams with a relative path, the image is > saved to the daemon's CWD. Similarly, when providing virDomainRestoreParams > with a relative path, it attempts to restore from the daemon's CWD. In most > configurations, the daemon's CWD is set to '/'. Ensure a relative path is > converted to absolute before invoking the driver domain{Save,Restore} Params > functions.
Are you aware of any common usage of these APIs with a relative path ?
No. I added this patch to the series after receiving feedback from testers
On Wed, Mar 19, 2025 at 05:43:20PM -0600, Jim Fehlig via Devel wrote: that
included "Providing a relative path to the location of the saved VM does not work". E.g. something like
# virsh restore --bypass-cache test/test-vm.sav error: Failed to restore domain from test/test-vm.sav error: Failed to open file 'test/test-vm.sav': No such file or directory"
Although we've not documented it, my general view is that all paths given to libvirt are expected to be absolute, everywhere - whether API parameters like these, or config file parmeters, or XML elements/attributes.
Agreed. Relative paths from (remote) clients are ambiguous on the server. I'm fine dropping this patch from the series.
In the perfect world we would canonicalize all relative paths on the client side, but doing that is such an enourmous & complex job it is not likely to be practical. We'd be better off just documenting use of relative paths as "out of scope" and / or "undefined behaviour"
I'll take a stab at improving the documentation.
So this actually breaks an existing usage, albeit from a simple user (e.g. myself). And because later patches switch from virDomain{Save,Restore}Flags to virDomain{Save,Restore}Params, this might be seen as a regression. If we do not want to canonicalize the paths, then we should error out on relative ones. Without that the current behaviour is not only what's described above (and how I noticed it), but also the following two commands:
virsh save domain asdf.img virsh save --image-format raw asdf.img
will behave differently. The first one saves the image in CWD of virsh, the second one will save the same image in CWD of virtqemud (or respective daemon), without any error or indication of that happening. Consequently, `virsh restore` will restore from a different file based on whether there are extra flags/parameters or not.
Urgh, right, I missed that we already have canonicalization in the virDomainSave method.
I forgot about that as well when deciding to drop this patch from the series.
Either we need to disallow relative paths or canonicalize them on the client, and ideally before the release.
Yeah, reluctantly, we need to preserve that behaviour in the newer virDomainSaveParams, to give consistent behaviour.
Yes, that behaviour is a relic of the past that we now need to live with. But that's the reason I'd give my
Reviewed-by: Martin Kletzander <mkletzan@redhat.com>
Thanks. Is there agreement I should push this before the release?
From my side I'm for it; @Daniel?
Regards, Jim

On Tue, Mar 25, 2025 at 04:33:03PM +0100, Martin Kletzander wrote:
On Tue, Mar 25, 2025 at 08:41:41AM -0600, Jim Fehlig wrote:
On 3/25/25 06:49, Martin Kletzander wrote:
On Tue, Mar 25, 2025 at 10:15:12AM +0000, Daniel P. Berrangé wrote:
On Tue, Mar 25, 2025 at 10:49:49AM +0100, Martin Kletzander wrote:
On 3/19/25 05:54, Daniel P. Berrangé wrote: > On Wed, Mar 05, 2025 at 03:48:10PM -0700, Jim Fehlig via Devel wrote: > > When invoking virDomainSaveParams with a relative path, the image is > > saved to the daemon's CWD. Similarly, when providing virDomainRestoreParams > > with a relative path, it attempts to restore from the daemon's CWD. In most > > configurations, the daemon's CWD is set to '/'. Ensure a relative path is > > converted to absolute before invoking the driver domain{Save,Restore} Params > > functions. > > Are you aware of any common usage of these APIs with a relative path ?
No. I added this patch to the series after receiving feedback from testers
On Wed, Mar 19, 2025 at 05:43:20PM -0600, Jim Fehlig via Devel wrote: that
included "Providing a relative path to the location of the saved VM does not work". E.g. something like
# virsh restore --bypass-cache test/test-vm.sav error: Failed to restore domain from test/test-vm.sav error: Failed to open file 'test/test-vm.sav': No such file or directory"
> Although we've not documented it, my general view is that all paths given > to libvirt are expected to be absolute, everywhere - whether API parameters > like these, or config file parmeters, or XML elements/attributes.
Agreed. Relative paths from (remote) clients are ambiguous on the server. I'm fine dropping this patch from the series.
> In the perfect world we would canonicalize all relative paths on the > client side, but doing that is such an enourmous & complex job it is > not likely to be practical. We'd be better off just documenting use > of relative paths as "out of scope" and / or "undefined behaviour"
I'll take a stab at improving the documentation.
So this actually breaks an existing usage, albeit from a simple user (e.g. myself). And because later patches switch from virDomain{Save,Restore}Flags to virDomain{Save,Restore}Params, this might be seen as a regression. If we do not want to canonicalize the paths, then we should error out on relative ones. Without that the current behaviour is not only what's described above (and how I noticed it), but also the following two commands:
virsh save domain asdf.img virsh save --image-format raw asdf.img
will behave differently. The first one saves the image in CWD of virsh, the second one will save the same image in CWD of virtqemud (or respective daemon), without any error or indication of that happening. Consequently, `virsh restore` will restore from a different file based on whether there are extra flags/parameters or not.
Urgh, right, I missed that we already have canonicalization in the virDomainSave method.
I forgot about that as well when deciding to drop this patch from the series.
Either we need to disallow relative paths or canonicalize them on the client, and ideally before the release.
Yeah, reluctantly, we need to preserve that behaviour in the newer virDomainSaveParams, to give consistent behaviour.
Yes, that behaviour is a relic of the past that we now need to live with. But that's the reason I'd give my
Reviewed-by: Martin Kletzander <mkletzan@redhat.com>
Thanks. Is there agreement I should push this before the release?
From my side I'm for it; @Daniel?
Yes, go for it. With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

Add new function qemuFDPassNewFromMonitor to get an fdset previously passed to qemu, based on the 'prefix' provided when the qemuFDPass object was initially created. Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_fd.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ src/qemu/qemu_fd.h | 4 ++++ 2 files changed, 50 insertions(+) diff --git a/src/qemu/qemu_fd.c b/src/qemu/qemu_fd.c index e847056573..333f9b128e 100644 --- a/src/qemu/qemu_fd.c +++ b/src/qemu/qemu_fd.c @@ -115,6 +115,52 @@ qemuFDPassNewPassed(unsigned int fdSetID) } +/** + * qemuFDPassNewFromMonitor: + * @prefix: Prefix of an FDset which was already passed to qemu + * @mon: monitor object + * + * Query the monitor for already passed FDsets and return a new qemuFDPass + * object if one is found to contain an fd with associated @prefix. Returns + * NULL on failure or if no matching FDset is found. Similar to + * qemuFDPassNewPassed, this is useful along with qemuFDPassTransferMonitorRollback + * when only knowing the qemuFDPass prefix. + */ +qemuFDPass * +qemuFDPassNewFromMonitor(const char *prefix, qemuMonitor *mon) +{ + g_autoptr(qemuMonitorFdsets) fdsets = NULL; + qemuFDPass *fdpass = NULL; + size_t i; + + VIR_DEBUG("prefix = %s", prefix); + + if (qemuMonitorQueryFdsets(mon, &fdsets) < 0) + return NULL; + + for (i = 0; i < fdsets->nfdsets; i++) { + qemuMonitorFdsetInfo fdset = fdsets->fdsets[i]; + size_t j; + + for (j = 0; j < fdset.nfds; j++) { + qemuMonitorFdsetFdInfo fdinfo = fdset.fds[j]; + + VIR_DEBUG("fdinfo opaque = %s", fdinfo.opaque); + if (STRPREFIX(prefix, fdinfo.opaque)) { + fdpass = g_new0(qemuFDPass, 1); + + fdpass->fdSetID = fdset.id; + fdpass->prefix = g_strdup(prefix); + fdpass->path = g_strdup_printf("/dev/fdset/%u", fdset.id); + fdpass->passed = true; + } + } + } + + return fdpass; +} + + /** * qemuFDPassIsPassed: * @fdpass: The fd passing helper struct diff --git a/src/qemu/qemu_fd.h b/src/qemu/qemu_fd.h index cd0ff2c690..d873e110a8 100644 --- a/src/qemu/qemu_fd.h +++ b/src/qemu/qemu_fd.h @@ -34,6 +34,10 @@ qemuFDPassNew(const char *prefix, qemuFDPass * qemuFDPassNewPassed(unsigned int fdSetID); +qemuFDPass * +qemuFDPassNewFromMonitor(const char *prefix, qemuMonitor *mon) + ATTRIBUTE_NONNULL(1); + bool qemuFDPassIsPassed(qemuFDPass *fdpass, unsigned *id); -- 2.43.0

On Wed, Mar 05, 2025 at 03:48:11PM -0700, Jim Fehlig via Devel wrote:
Add new function qemuFDPassNewFromMonitor to get an fdset previously passed to qemu, based on the 'prefix' provided when the qemuFDPass object was initially created.
Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_fd.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ src/qemu/qemu_fd.h | 4 ++++ 2 files changed, 50 insertions(+)
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

Add new function qemuMigrationParamsCapEnabled() to check if a capability is set in the caller-provided migration parameters. Signed-off-by: Jim Fehlig <jfehlig@suse.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> --- src/qemu/qemu_migration_params.c | 16 ++++++++++++++++ src/qemu/qemu_migration_params.h | 4 ++++ 2 files changed, 20 insertions(+) diff --git a/src/qemu/qemu_migration_params.c b/src/qemu/qemu_migration_params.c index c10660d6f2..252437e5b2 100644 --- a/src/qemu/qemu_migration_params.c +++ b/src/qemu/qemu_migration_params.c @@ -1312,6 +1312,22 @@ qemuMigrationParamsGetULL(qemuMigrationParams *migParams, } +/** + * Returns true if @cap is enabled in @migParams, false otherwise. + */ +bool +qemuMigrationParamsCapEnabled(qemuMigrationParams *migParams, + qemuMigrationCapability cap) +{ + bool enabled = false; + + if (migParams) + ignore_value(virBitmapGetBit(migParams->caps, cap, &enabled)); + + return enabled; +} + + /** * qemuMigrationParamsCheck: * diff --git a/src/qemu/qemu_migration_params.h b/src/qemu/qemu_migration_params.h index 17fc63f527..da62ca5b8f 100644 --- a/src/qemu/qemu_migration_params.h +++ b/src/qemu/qemu_migration_params.h @@ -137,6 +137,10 @@ qemuMigrationParamsGetULL(qemuMigrationParams *migParams, qemuMigrationParam param, unsigned long long *value); +bool +qemuMigrationParamsCapEnabled(qemuMigrationParams *migParams, + qemuMigrationCapability cap); + void qemuMigrationParamsSetBlockDirtyBitmapMapping(qemuMigrationParams *migParams, virJSONValue **params); -- 2.43.0

Signed-off-by: Jim Fehlig <jfehlig@suse.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> --- src/qemu/qemu_migration_params.c | 18 ++++++++++++++++++ src/qemu/qemu_migration_params.h | 5 +++++ 2 files changed, 23 insertions(+) diff --git a/src/qemu/qemu_migration_params.c b/src/qemu/qemu_migration_params.c index 252437e5b2..939d9f01e4 100644 --- a/src/qemu/qemu_migration_params.c +++ b/src/qemu/qemu_migration_params.c @@ -1312,6 +1312,24 @@ qemuMigrationParamsGetULL(qemuMigrationParams *migParams, } +int +qemuMigrationParamsGetBool(qemuMigrationParams *migParams, + qemuMigrationParam param, + bool *value) +{ + if (!migParams || !value) + return 0; + + if (qemuMigrationParamsCheckType(param, QEMU_MIGRATION_PARAM_TYPE_BOOL) < 0) + return -1; + + *value = migParams->params[param].set ? + migParams->params[param].value.b : false; + + return 0; +} + + /** * Returns true if @cap is enabled in @migParams, false otherwise. */ diff --git a/src/qemu/qemu_migration_params.h b/src/qemu/qemu_migration_params.h index da62ca5b8f..fb2cfd9fad 100644 --- a/src/qemu/qemu_migration_params.h +++ b/src/qemu/qemu_migration_params.h @@ -137,6 +137,11 @@ qemuMigrationParamsGetULL(qemuMigrationParams *migParams, qemuMigrationParam param, unsigned long long *value); +int +qemuMigrationParamsGetBool(qemuMigrationParams *migParams, + qemuMigrationParam param, + bool *value); + bool qemuMigrationParamsCapEnabled(qemuMigrationParams *migParams, qemuMigrationCapability cap); -- 2.43.0

Add the mapped-ram migration capability introduced in QEMU 9.0. Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_migration_params.c | 1 + src/qemu/qemu_migration_params.h | 1 + 2 files changed, 2 insertions(+) diff --git a/src/qemu/qemu_migration_params.c b/src/qemu/qemu_migration_params.c index 939d9f01e4..4fc9199f1c 100644 --- a/src/qemu/qemu_migration_params.c +++ b/src/qemu/qemu_migration_params.c @@ -106,6 +106,7 @@ VIR_ENUM_IMPL(qemuMigrationCapability, "zero-copy-send", "postcopy-preempt", "switchover-ack", + "mapped-ram", ); diff --git a/src/qemu/qemu_migration_params.h b/src/qemu/qemu_migration_params.h index fb2cfd9fad..2379950ce9 100644 --- a/src/qemu/qemu_migration_params.h +++ b/src/qemu/qemu_migration_params.h @@ -42,6 +42,7 @@ typedef enum { QEMU_MIGRATION_CAP_ZERO_COPY_SEND, QEMU_MIGRATION_CAP_POSTCOPY_PREEMPT, QEMU_MIGRATION_CAP_SWITCHOVER_ACK, + QEMU_MIGRATION_CAP_MAPPED_RAM, QEMU_MIGRATION_CAP_LAST } qemuMigrationCapability; -- 2.43.0

Introduce qemuMigrationParamsForSave() to create a qemuMigrationParams object initialized with appropriate migration capabilities and parameters for a save operation. Note that mapped-ram capability also requires the multifd capability. For now, the number of multifd channels is set to 1. Future work to support parallel save/restore can set the number of channels to a user-specified value. Signed-off-by: Jim Fehlig <jfehlig@suse.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> --- src/qemu/qemu_migration.c | 2 +- src/qemu/qemu_migration_params.c | 21 +++++++++++++++++++++ src/qemu/qemu_migration_params.h | 3 +++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index e1399806d5..8753fb5358 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -7139,7 +7139,7 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm, /* Increase migration bandwidth to unlimited since target is a file. * Failure to change migration speed is not fatal. */ - if (!(migParams = qemuMigrationParamsNew())) + if (!(migParams = qemuMigrationParamsForSave(false))) return -1; if (qemuMigrationParamsSetULL(migParams, diff --git a/src/qemu/qemu_migration_params.c b/src/qemu/qemu_migration_params.c index 4fc9199f1c..d48cdd5506 100644 --- a/src/qemu/qemu_migration_params.c +++ b/src/qemu/qemu_migration_params.c @@ -792,6 +792,27 @@ qemuMigrationParamsFromFlags(virTypedParameterPtr params, } +qemuMigrationParams * +qemuMigrationParamsForSave(bool sparse) +{ + g_autoptr(qemuMigrationParams) saveParams = NULL; + + if (!(saveParams = qemuMigrationParamsNew())) + return NULL; + + if (sparse) { + if (virBitmapSetBit(saveParams->caps, QEMU_MIGRATION_CAP_MAPPED_RAM) < 0) + return NULL; + if (virBitmapSetBit(saveParams->caps, QEMU_MIGRATION_CAP_MULTIFD) < 0) + return NULL; + saveParams->params[QEMU_MIGRATION_PARAM_MULTIFD_CHANNELS].value.i = 1; + saveParams->params[QEMU_MIGRATION_PARAM_MULTIFD_CHANNELS].set = true; + } + + return g_steal_pointer(&saveParams); +} + + int qemuMigrationParamsDump(qemuMigrationParams *migParams, virTypedParameterPtr *params, diff --git a/src/qemu/qemu_migration_params.h b/src/qemu/qemu_migration_params.h index 2379950ce9..88a1bc1a66 100644 --- a/src/qemu/qemu_migration_params.h +++ b/src/qemu/qemu_migration_params.h @@ -87,6 +87,9 @@ qemuMigrationParamsFromFlags(virTypedParameterPtr params, unsigned int flags, qemuMigrationParty party); +qemuMigrationParams * +qemuMigrationParamsForSave(bool sparse); + int qemuMigrationParamsDump(qemuMigrationParams *migParams, virTypedParameterPtr *params, -- 2.43.0

Extend the list of formats to include "sparse", which uses QEMU's mapped-ram stream format [1] to write guest memory blocks at fixed offsets in the save image file. [1] https://gitlab.com/qemu-project/qemu/-/blob/master/docs/devel/migration/mapp... Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu.conf.in | 9 ++++++++- src/qemu/qemu_saveimage.c | 1 + src/qemu/qemu_saveimage.h | 1 + 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/qemu/qemu.conf.in b/src/qemu/qemu.conf.in index 31172303dc..44ff658ad6 100644 --- a/src/qemu/qemu.conf.in +++ b/src/qemu/qemu.conf.in @@ -588,11 +588,18 @@ # disk space when saving large memory guests. Various compression formats are # available for specifying a save image compressed by the named algorithm. # Supported compression formats are "zstd", "lzop", "gzip", "bzip2", and "xz". +# The "sparse" format uses QEMU's mapped-ram stream format to write guest memory +# blocks at fixed offsets in the save image file. # save_image_format can be used to select the desired save format. "raw" is # the traditional format used by libvirt and is also the default. The # compression formats can be used to save disk space, although this typically -# results in longer save and restore times. +# results in longer save and restore times. The "sparse" format results in a +# save image file that is roughly the logical size of the guest's memory, +# although on-disk size is a function of guest memory usage. The "sparse" +# format is useful when a predictable maximum save image file size is +# needed. The other formats can result in a save image file much larger +# than guest memory if the guest runs a memory intensive workload. # # save_image_format is used with 'virsh save' or 'virsh managedsave'. It is # an error if the specified save_image_format is not valid, or cannot be diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c index d940bfb5c3..29b4e39879 100644 --- a/src/qemu/qemu_saveimage.c +++ b/src/qemu/qemu_saveimage.c @@ -45,6 +45,7 @@ VIR_ENUM_IMPL(qemuSaveFormat, "xz", "lzop", "zstd", + "sparse", ); static inline void diff --git a/src/qemu/qemu_saveimage.h b/src/qemu/qemu_saveimage.h index 58f8252c8a..0411f3140a 100644 --- a/src/qemu/qemu_saveimage.h +++ b/src/qemu/qemu_saveimage.h @@ -42,6 +42,7 @@ typedef enum { QEMU_SAVE_FORMAT_XZ = 3, QEMU_SAVE_FORMAT_LZOP = 4, QEMU_SAVE_FORMAT_ZSTD = 5, + QEMU_SAVE_FORMAT_SPARSE = 6, /* Note: add new members only at the end. These values are used in the on-disk format. Do not change or re-use numbers. */ -- 2.43.0

On Wed, Mar 05, 2025 at 03:48:16PM -0700, Jim Fehlig via Devel wrote:
Extend the list of formats to include "sparse", which uses QEMU's mapped-ram stream format [1] to write guest memory blocks at fixed offsets in the save image file.
[1] https://gitlab.com/qemu-project/qemu/-/blob/master/docs/devel/migration/mapp...
Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu.conf.in | 9 ++++++++- src/qemu/qemu_saveimage.c | 1 + src/qemu/qemu_saveimage.h | 1 + 3 files changed, 10 insertions(+), 1 deletion(-)
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

Move the code in qemuSaveImageCreate that opens, labels, and wraps the save image fd to a helper function, providing more flexibility for upcoming mapped-ram support. Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_saveimage.c | 65 +++++++++++++++++++++++++++------------ 1 file changed, 45 insertions(+), 20 deletions(-) diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c index 29b4e39879..e3f6a0ad0f 100644 --- a/src/qemu/qemu_saveimage.c +++ b/src/qemu/qemu_saveimage.c @@ -425,6 +425,50 @@ qemuSaveImageDecompressionStop(virCommand *cmd, } +static int +qemuSaveImageCreateFd(virQEMUDriver *driver, + virDomainObj *vm, + const char *path, + virFileWrapperFd *wrapperFd, + bool *needUnlink, + unsigned int flags) +{ + g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); + int ret = -1; + VIR_AUTOCLOSE fd = -1; + int directFlag = 0; + unsigned int wrapperFlags = VIR_FILE_WRAPPER_NON_BLOCKING; + + if (flags & VIR_DOMAIN_SAVE_BYPASS_CACHE) { + wrapperFlags |= VIR_FILE_WRAPPER_BYPASS_CACHE; + directFlag = virFileDirectFdFlag(); + if (directFlag < 0) { + virReportError(VIR_ERR_OPERATION_FAILED, "%s", + _("bypass cache unsupported by this system")); + return -1; + } + } + + fd = virQEMUFileOpenAs(cfg->user, cfg->group, false, path, + O_WRONLY | O_TRUNC | O_CREAT | directFlag, + needUnlink); + + if (fd < 0) + return -1; + + if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0) + return -1; + + if (!(wrapperFd = virFileWrapperFdNew(&fd, path, wrapperFlags))) + return -1; + + ret = fd; + fd = -1; + + return ret; +} + + /* Helper function to execute a migration to file with a correct save header * the caller needs to make sure that the processors are stopped and do all other * actions besides saving memory */ @@ -441,33 +485,14 @@ qemuSaveImageCreate(virQEMUDriver *driver, bool needUnlink = false; int ret = -1; int fd = -1; - int directFlag = 0; virFileWrapperFd *wrapperFd = NULL; - unsigned int wrapperFlags = VIR_FILE_WRAPPER_NON_BLOCKING; /* Obtain the file handle. */ - if ((flags & VIR_DOMAIN_SAVE_BYPASS_CACHE)) { - wrapperFlags |= VIR_FILE_WRAPPER_BYPASS_CACHE; - directFlag = virFileDirectFdFlag(); - if (directFlag < 0) { - virReportError(VIR_ERR_OPERATION_FAILED, "%s", - _("bypass cache unsupported by this system")); - goto cleanup; - } - } + fd = qemuSaveImageCreateFd(driver, vm, path, wrapperFd, &needUnlink, flags); - fd = virQEMUFileOpenAs(cfg->user, cfg->group, false, path, - O_WRONLY | O_TRUNC | O_CREAT | directFlag, - &needUnlink); if (fd < 0) goto cleanup; - if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0) - goto cleanup; - - if (!(wrapperFd = virFileWrapperFdNew(&fd, path, wrapperFlags))) - goto cleanup; - if (virQEMUSaveDataWrite(data, fd, path) < 0) goto cleanup; -- 2.43.0

On Wed, Mar 05, 2025 at 03:48:17PM -0700, Jim Fehlig via Devel wrote:
Move the code in qemuSaveImageCreate that opens, labels, and wraps the save image fd to a helper function, providing more flexibility for upcoming mapped-ram support.
Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_saveimage.c | 65 +++++++++++++++++++++++++++------------ 1 file changed, 45 insertions(+), 20 deletions(-)
diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c index 29b4e39879..e3f6a0ad0f 100644 --- a/src/qemu/qemu_saveimage.c +++ b/src/qemu/qemu_saveimage.c @@ -425,6 +425,50 @@ qemuSaveImageDecompressionStop(virCommand *cmd, }
+static int +qemuSaveImageCreateFd(virQEMUDriver *driver, + virDomainObj *vm, + const char *path, + virFileWrapperFd *wrapperFd,
Doesn't this need to be virFileWrapperFd **, otherwise...
+ bool *needUnlink, + unsigned int flags) +{ + g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); + int ret = -1; + VIR_AUTOCLOSE fd = -1; + int directFlag = 0; + unsigned int wrapperFlags = VIR_FILE_WRAPPER_NON_BLOCKING; + + if (flags & VIR_DOMAIN_SAVE_BYPASS_CACHE) { + wrapperFlags |= VIR_FILE_WRAPPER_BYPASS_CACHE; + directFlag = virFileDirectFdFlag(); + if (directFlag < 0) { + virReportError(VIR_ERR_OPERATION_FAILED, "%s", + _("bypass cache unsupported by this system")); + return -1; + } + } + + fd = virQEMUFileOpenAs(cfg->user, cfg->group, false, path, + O_WRONLY | O_TRUNC | O_CREAT | directFlag, + needUnlink); + + if (fd < 0) + return -1; + + if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0) + return -1; + + if (!(wrapperFd = virFileWrapperFdNew(&fd, path, wrapperFlags))) + return -1;
....this assignment won't be visible to the caller
+ + ret = fd; + fd = -1; + + return ret; +} + + /* Helper function to execute a migration to file with a correct save header * the caller needs to make sure that the processors are stopped and do all other * actions besides saving memory */ @@ -441,33 +485,14 @@ qemuSaveImageCreate(virQEMUDriver *driver, bool needUnlink = false; int ret = -1; int fd = -1; - int directFlag = 0; virFileWrapperFd *wrapperFd = NULL; - unsigned int wrapperFlags = VIR_FILE_WRAPPER_NON_BLOCKING;
...
+ fd = qemuSaveImageCreateFd(driver, vm, path, wrapperFd, &needUnlink, flags);
..This would neeed to be &wrapperFd
- fd = virQEMUFileOpenAs(cfg->user, cfg->group, false, path, - O_WRONLY | O_TRUNC | O_CREAT | directFlag, - &needUnlink); if (fd < 0) goto cleanup;
- if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0) - goto cleanup; - - if (!(wrapperFd = virFileWrapperFdNew(&fd, path, wrapperFlags))) - goto cleanup; - if (virQEMUSaveDataWrite(data, fd, path) < 0) goto cleanup;
-- 2.43.0
With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

On 3/19/25 06:49, Daniel P. Berrangé wrote:
On Wed, Mar 05, 2025 at 03:48:17PM -0700, Jim Fehlig via Devel wrote:
Move the code in qemuSaveImageCreate that opens, labels, and wraps the save image fd to a helper function, providing more flexibility for upcoming mapped-ram support.
Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_saveimage.c | 65 +++++++++++++++++++++++++++------------ 1 file changed, 45 insertions(+), 20 deletions(-)
diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c index 29b4e39879..e3f6a0ad0f 100644 --- a/src/qemu/qemu_saveimage.c +++ b/src/qemu/qemu_saveimage.c @@ -425,6 +425,50 @@ qemuSaveImageDecompressionStop(virCommand *cmd, }
+static int +qemuSaveImageCreateFd(virQEMUDriver *driver, + virDomainObj *vm, + const char *path, + virFileWrapperFd *wrapperFd,
Doesn't this need to be virFileWrapperFd **, otherwise...
+ bool *needUnlink, + unsigned int flags) +{ + g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); + int ret = -1; + VIR_AUTOCLOSE fd = -1; + int directFlag = 0; + unsigned int wrapperFlags = VIR_FILE_WRAPPER_NON_BLOCKING; + + if (flags & VIR_DOMAIN_SAVE_BYPASS_CACHE) { + wrapperFlags |= VIR_FILE_WRAPPER_BYPASS_CACHE; + directFlag = virFileDirectFdFlag(); + if (directFlag < 0) { + virReportError(VIR_ERR_OPERATION_FAILED, "%s", + _("bypass cache unsupported by this system")); + return -1; + } + } + + fd = virQEMUFileOpenAs(cfg->user, cfg->group, false, path, + O_WRONLY | O_TRUNC | O_CREAT | directFlag, + needUnlink); + + if (fd < 0) + return -1; + + if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0) + return -1; + + if (!(wrapperFd = virFileWrapperFdNew(&fd, path, wrapperFlags))) + return -1;
....this assignment won't be visible to the caller
Opps, which makes qemuDomainFileWrapperFDClose a NOP in all cases. I've applied the below diff locally. Any need to respin this series? Regards, Jim diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c index e3f6a0ad0f..7ab44edcdc 100644 --- a/src/qemu/qemu_saveimage.c +++ b/src/qemu/qemu_saveimage.c @@ -429,7 +429,7 @@ static int qemuSaveImageCreateFd(virQEMUDriver *driver, virDomainObj *vm, const char *path, - virFileWrapperFd *wrapperFd, + virFileWrapperFd **wrapperFd, bool *needUnlink, unsigned int flags) { @@ -459,7 +459,7 @@ qemuSaveImageCreateFd(virQEMUDriver *driver, if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0) return -1; - if (!(wrapperFd = virFileWrapperFdNew(&fd, path, wrapperFlags))) + if (!(*wrapperFd = virFileWrapperFdNew(&fd, path, wrapperFlags))) return -1; ret = fd; @@ -488,7 +488,7 @@ qemuSaveImageCreate(virQEMUDriver *driver, virFileWrapperFd *wrapperFd = NULL; /* Obtain the file handle. */ - fd = qemuSaveImageCreateFd(driver, vm, path, wrapperFd, &needUnlink, flags); + fd = qemuSaveImageCreateFd(driver, vm, path, &wrapperFd, &needUnlink, flags); if (fd < 0) goto cleanup;

On Wed, Mar 19, 2025 at 05:09:01PM -0600, Jim Fehlig wrote:
On 3/19/25 06:49, Daniel P. Berrangé wrote:
On Wed, Mar 05, 2025 at 03:48:17PM -0700, Jim Fehlig via Devel wrote:
Move the code in qemuSaveImageCreate that opens, labels, and wraps the save image fd to a helper function, providing more flexibility for upcoming mapped-ram support.
Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_saveimage.c | 65 +++++++++++++++++++++++++++------------ 1 file changed, 45 insertions(+), 20 deletions(-)
diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c index 29b4e39879..e3f6a0ad0f 100644 --- a/src/qemu/qemu_saveimage.c +++ b/src/qemu/qemu_saveimage.c @@ -425,6 +425,50 @@ qemuSaveImageDecompressionStop(virCommand *cmd, } +static int +qemuSaveImageCreateFd(virQEMUDriver *driver, + virDomainObj *vm, + const char *path, + virFileWrapperFd *wrapperFd,
Doesn't this need to be virFileWrapperFd **, otherwise...
+ bool *needUnlink, + unsigned int flags) +{ + g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); + int ret = -1; + VIR_AUTOCLOSE fd = -1; + int directFlag = 0; + unsigned int wrapperFlags = VIR_FILE_WRAPPER_NON_BLOCKING; + + if (flags & VIR_DOMAIN_SAVE_BYPASS_CACHE) { + wrapperFlags |= VIR_FILE_WRAPPER_BYPASS_CACHE; + directFlag = virFileDirectFdFlag(); + if (directFlag < 0) { + virReportError(VIR_ERR_OPERATION_FAILED, "%s", + _("bypass cache unsupported by this system")); + return -1; + } + } + + fd = virQEMUFileOpenAs(cfg->user, cfg->group, false, path, + O_WRONLY | O_TRUNC | O_CREAT | directFlag, + needUnlink); + + if (fd < 0) + return -1; + + if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0) + return -1; + + if (!(wrapperFd = virFileWrapperFdNew(&fd, path, wrapperFlags))) + return -1;
....this assignment won't be visible to the caller
Opps, which makes qemuDomainFileWrapperFDClose a NOP in all cases. I've applied the below diff locally. Any need to respin this series?
..snip.. Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

Introduce support for QEMU's new mapped-ram stream format [1]. mapped-ram can be enabled by setting the 'save_image_format' setting in qemu.conf to 'sparse'. To use mapped-ram with QEMU: - The 'mapped-ram' migration capability must be set to true - The 'multifd' migration capability must be set to true and the 'multifd-channels' migration parameter must set to 1 - QEMU must be provided an fdset containing the migration fd - The 'migrate' qmp command is invoked with a URI referencing the fdset and an offset where to start reading or writing the data stream, e.g. {"execute":"migrate", "arguments":{"detach":true,"resume":false, "uri":"file:/dev/fdset/0,offset=0x11921"}} The mapped-ram stream, in conjunction with direct IO and multifd support provided by subsequent patches, can significantly improve the time required to save VM memory state. The following tables compare mapped-ram with the existing, sequential save stream. In all cases, the save and restore operations are to/from a block device comprised of two NVMe disks in RAID0 configuration with xfs (~8600MiB/s). The values in the 'save time' and 'restore time' columns were scraped from the 'real' time reported by time(1). The 'Size' and 'Blocks' columns were provided by the corresponding outputs of stat(1). VM: 32G RAM, 1 vcpu, idle (shortly after boot) | save | restore | | time | time | Size | Blocks -----------------------+---------+---------+--------------+-------- legacy | 6.193s | 4.399s | 985744812 | 1925288 -----------------------+---------+---------+--------------+-------- mapped-ram | 5.109s | 1.176s | 34368554354 | 1774472 -----------------------+---------+---------+--------------+-------- legacy + direct IO | 5.725s | 4.512s | 985765251 | 1925328 -----------------------+---------+---------+--------------+-------- mapped-ram + direct IO | 4.627s | 1.490s | 34368554354 | 1774304 -----------------------+---------+---------+--------------+-------- mapped-ram + direct IO | | | | + multifd-channels=8 | 4.421s | 0.845s | 34368554318 | 1774312 ------------------------------------------------------------------- VM: 32G RAM, 30G dirty, 1 vcpu in tight loop dirtying memory | save | restore | | time | time | Size | Blocks -----------------------+---------+---------+--------------+--------- legacy | 25.800s | 14.332s | 33154309983 | 64754512 -----------------------+---------+---------+--------------+--------- mapped-ram | 18.742s | 15.027s | 34368559228 | 64617160 -----------------------+---------+---------+--------------+--------- legacy + direct IO | 13.115s | 18.050s | 33154310496 | 64754520 -----------------------+---------+---------+--------------+--------- mapped-ram + direct IO | 13.623s | 15.959s | 34368557392 | 64662040 -----------------------+-------- +---------+--------------+--------- mapped-ram + direct IO | | | | + multifd-channels=8 | 6.994s | 6.470s | 34368554980 | 64665776 -------------------------------------------------------------------- As can be seen from the tables, one caveat of mapped-ram is the logical file size of a saved image is basically equivalent to the VM memory size. Note however that mapped-ram typically uses fewer blocks on disk, hence the name 'sparse' for 'save_image_format'. Also note the mapped-ram stream is incompatible with the existing stream format, hence mapped-ram cannot be used to restore an image saved with the existing format and vice versa. [1] https://gitlab.com/qemu-project/qemu/-/blob/master/docs/devel/migration/mapp... Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_driver.c | 23 +++++- src/qemu/qemu_migration.c | 149 ++++++++++++++++++++++++++------------ src/qemu/qemu_migration.h | 4 +- src/qemu/qemu_monitor.c | 34 +++++++++ src/qemu/qemu_monitor.h | 4 + src/qemu/qemu_saveimage.c | 31 +++++--- src/qemu/qemu_saveimage.h | 1 + src/qemu/qemu_snapshot.c | 7 +- 8 files changed, 191 insertions(+), 62 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 53bdae402a..2b2dda9d26 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -2599,6 +2599,7 @@ qemuDomainSaveInternal(virQEMUDriver *driver, qemuDomainObjPrivate *priv = vm->privateData; virQEMUSaveData *data = NULL; g_autoptr(qemuDomainSaveCookie) cookie = NULL; + g_autoptr(qemuMigrationParams) saveParams = NULL; if (virDomainObjBeginAsyncJob(vm, VIR_ASYNC_JOB_SAVE, VIR_DOMAIN_JOB_OPERATION_SAVE, flags) < 0) @@ -2607,6 +2608,14 @@ qemuDomainSaveInternal(virQEMUDriver *driver, if (!qemuMigrationSrcIsAllowed(vm, false, VIR_ASYNC_JOB_SAVE, 0)) goto endjob; + if (format == QEMU_SAVE_FORMAT_SPARSE && + !qemuMigrationCapsGet(vm, QEMU_MIGRATION_CAP_MAPPED_RAM)) { + virReportError(VIR_ERR_OPERATION_UNSUPPORTED, + _("save image format %1$s is not supported by this QEMU binary"), + qemuSaveFormatTypeToString(format)); + goto endjob; + } + if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); @@ -2670,8 +2679,11 @@ qemuDomainSaveInternal(virQEMUDriver *driver, goto endjob; xml = NULL; + if (!(saveParams = qemuMigrationParamsForSave(format == QEMU_SAVE_FORMAT_SPARSE))) + goto endjob; + ret = qemuSaveImageCreate(driver, vm, path, data, compressor, - flags, VIR_ASYNC_JOB_SAVE); + saveParams, flags, VIR_ASYNC_JOB_SAVE); if (ret < 0) goto endjob; @@ -3103,6 +3115,8 @@ doCoreDump(virQEMUDriver *driver, memory_dump_format) < 0) goto cleanup; } else { + g_autoptr(qemuMigrationParams) dump_params = NULL; + if (dumpformat != VIR_DOMAIN_CORE_DUMP_FORMAT_RAW) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", _("kdump-compressed format is only supported with memory-only dump")); @@ -3112,8 +3126,11 @@ doCoreDump(virQEMUDriver *driver, if (!qemuMigrationSrcIsAllowed(vm, false, VIR_ASYNC_JOB_DUMP, 0)) goto cleanup; - if (qemuMigrationSrcToFile(driver, vm, fd, compressor, - VIR_ASYNC_JOB_DUMP) < 0) + if (!(dump_params = qemuMigrationParamsNew())) + goto cleanup; + + if (qemuMigrationSrcToFile(driver, vm, &fd, compressor, + dump_params, dump_flags, VIR_ASYNC_JOB_DUMP) < 0) goto cleanup; } diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 8753fb5358..f692986056 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -7118,46 +7118,17 @@ qemuMigrationProcessUnattended(virQEMUDriver *driver, } -/* Helper function called while vm is active. */ -int -qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm, - int fd, - virCommand *compressor, - virDomainAsyncJob asyncJob) +static int +qemuMigrationSrcToLegacyFile(virQEMUDriver *driver, + virDomainObj *vm, + int fd, + virCommand *compressor, + virDomainAsyncJob asyncJob) { qemuDomainObjPrivate *priv = vm->privateData; - int rc; int ret = -1; int pipeFD[2] = { -1, -1 }; - unsigned long saveMigBandwidth = priv->migMaxBandwidth; char *errbuf = NULL; - virErrorPtr orig_err = NULL; - g_autoptr(qemuMigrationParams) migParams = NULL; - - if (qemuMigrationSetDBusVMState(driver, vm) < 0) - return -1; - - /* Increase migration bandwidth to unlimited since target is a file. - * Failure to change migration speed is not fatal. */ - if (!(migParams = qemuMigrationParamsForSave(false))) - return -1; - - if (qemuMigrationParamsSetULL(migParams, - QEMU_MIGRATION_PARAM_MAX_BANDWIDTH, - QEMU_DOMAIN_MIG_BANDWIDTH_MAX * 1024 * 1024) < 0) - return -1; - - if (qemuMigrationParamsApply(vm, asyncJob, migParams, 0) < 0) - return -1; - - priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX; - - if (!virDomainObjIsActive(vm)) { - virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("guest unexpectedly quit")); - /* nothing to tear down */ - return -1; - } if (compressor && virPipe(pipeFD) < 0) return -1; @@ -7174,7 +7145,7 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm, goto cleanup; if (!compressor) { - rc = qemuMonitorMigrateToFd(priv->mon, 0, fd); + ret = qemuMonitorMigrateToFd(priv->mon, 0, fd); } else { virCommandSetInputFD(compressor, pipeFD[0]); virCommandSetOutputFD(compressor, &fd); @@ -7190,12 +7161,98 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm, qemuDomainObjExitMonitor(vm); goto cleanup; } - rc = qemuMonitorMigrateToFd(priv->mon, 0, pipeFD[1]); + ret = qemuMonitorMigrateToFd(priv->mon, 0, pipeFD[1]); if (VIR_CLOSE(pipeFD[0]) < 0 || VIR_CLOSE(pipeFD[1]) < 0) VIR_WARN("failed to close intermediate pipe"); } qemuDomainObjExitMonitor(vm); + + cleanup: + VIR_FORCE_CLOSE(pipeFD[0]); + VIR_FORCE_CLOSE(pipeFD[1]); + + if (errbuf) { + VIR_DEBUG("Compression binary stderr: %s", NULLSTR(errbuf)); + VIR_FREE(errbuf); + } + + return ret; +} + + +static int +qemuMigrationSrcToSparseFile(virQEMUDriver *driver, + virDomainObj *vm, + int *fd, + unsigned int flags, + virDomainAsyncJob asyncJob) +{ + int ret; + + /* mapped-ram does not support directIO */ + if ((flags & VIR_DOMAIN_SAVE_BYPASS_CACHE)) { + virReportError(VIR_ERR_OPERATION_FAILED, "%s", + _("bypass cache unsupported by this system")); + return -1; + } + + if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, *fd) < 0) + return -1; + + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) + return -1; + + ret = qemuMonitorMigrateToFdSet(vm, 0, fd); + qemuDomainObjExitMonitor(vm); + return ret; +} + + +/* Helper function called while vm is active. */ +int +qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm, + int *fd, + virCommand *compressor, + qemuMigrationParams *migParams, + unsigned int flags, + virDomainAsyncJob asyncJob) +{ + qemuDomainObjPrivate *priv = vm->privateData; + int rc; + int ret = -1; + unsigned long saveMigBandwidth = priv->migMaxBandwidth; + virErrorPtr orig_err = NULL; + + if (qemuMigrationSetDBusVMState(driver, vm) < 0) + return -1; + + /* Increase migration bandwidth to unlimited since target is a file. + * Failure to change migration speed is not fatal. */ + if (migParams && + qemuMigrationParamsSetULL(migParams, + QEMU_MIGRATION_PARAM_MAX_BANDWIDTH, + QEMU_DOMAIN_MIG_BANDWIDTH_MAX * 1024 * 1024) < 0) + return -1; + + if (qemuMigrationParamsApply(vm, asyncJob, migParams, 0) < 0) + return -1; + + priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX; + + if (!virDomainObjIsActive(vm)) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("guest unexpectedly quit")); + /* nothing to tear down */ + return -1; + } + + if (migParams && + qemuMigrationParamsCapEnabled(migParams, QEMU_MIGRATION_CAP_MAPPED_RAM)) + rc = qemuMigrationSrcToSparseFile(driver, vm, fd, flags, asyncJob); + else + rc = qemuMigrationSrcToLegacyFile(driver, vm, *fd, compressor, asyncJob); + if (rc < 0) goto cleanup; @@ -7221,8 +7278,17 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm, if (ret < 0 && !orig_err) virErrorPreserveLast(&orig_err); - /* Restore max migration bandwidth */ + /* Remove fdset passed to qemu and restore max migration bandwidth */ if (qemuDomainObjIsActive(vm)) { + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) == 0) { + qemuFDPass *fdPass = + qemuFDPassNewFromMonitor("libvirt-outgoing-migrate", priv->mon); + + if (fdPass) + qemuFDPassTransferMonitorRollback(fdPass, priv->mon); + qemuDomainObjExitMonitor(vm); + } + if (qemuMigrationParamsSetULL(migParams, QEMU_MIGRATION_PARAM_MAX_BANDWIDTH, saveMigBandwidth * 1024 * 1024) == 0) @@ -7231,13 +7297,6 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm, priv->migMaxBandwidth = saveMigBandwidth; } - VIR_FORCE_CLOSE(pipeFD[0]); - VIR_FORCE_CLOSE(pipeFD[1]); - if (errbuf) { - VIR_DEBUG("Compression binary stderr: %s", NULLSTR(errbuf)); - VIR_FREE(errbuf); - } - virErrorRestore(&orig_err); return ret; diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index efe1b9e88a..9fa007b949 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -238,8 +238,10 @@ qemuMigrationSrcIsAllowed(virDomainObj *vm, int qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm, - int fd, + int *fd, virCommand *compressor, + qemuMigrationParams *migParams, + unsigned int flags, virDomainAsyncJob asyncJob) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT; diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index c069d17265..8d244c6f4a 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -2232,6 +2232,40 @@ qemuMonitorMigrateToFd(qemuMonitor *mon, } +int +qemuMonitorMigrateToFdSet(virDomainObj *vm, + unsigned int flags, + int *fd) +{ + qemuDomainObjPrivate *priv = vm->privateData; + qemuMonitor *mon = priv->mon; + off_t offset; + g_autoptr(qemuFDPass) fdPassMigrate = NULL; + g_autofree char *uri = NULL; + int ret; + + VIR_DEBUG("fd=%d flags=0x%x", *fd, flags); + + QEMU_CHECK_MONITOR(mon); + + if ((offset = lseek(*fd, 0, SEEK_CUR)) == -1) { + virReportSystemError(errno, + "%s", _("failed to seek on file descriptor")); + return -1; + } + + fdPassMigrate = qemuFDPassNew("libvirt-outgoing-migrate", priv); + qemuFDPassAddFD(fdPassMigrate, fd, "-fd"); + qemuFDPassTransferMonitor(fdPassMigrate, mon); + + uri = g_strdup_printf("file:%s,offset=%#lx", + qemuFDPassGetPath(fdPassMigrate), offset); + ret = qemuMonitorJSONMigrate(mon, flags, uri); + + return ret; +} + + int qemuMonitorMigrateToHost(qemuMonitor *mon, unsigned int flags, diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index f7b9263b64..4022bc3c7b 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -859,6 +859,10 @@ int qemuMonitorMigrateToFd(qemuMonitor *mon, unsigned int flags, int fd); +int qemuMonitorMigrateToFdSet(virDomainObj *vm, + unsigned int flags, + int *fd); + int qemuMonitorMigrateToHost(qemuMonitor *mon, unsigned int flags, const char *protocol, diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c index e3f6a0ad0f..cad982f450 100644 --- a/src/qemu/qemu_saveimage.c +++ b/src/qemu/qemu_saveimage.c @@ -430,6 +430,7 @@ qemuSaveImageCreateFd(virQEMUDriver *driver, virDomainObj *vm, const char *path, virFileWrapperFd *wrapperFd, + bool sparse, bool *needUnlink, unsigned int flags) { @@ -439,7 +440,7 @@ qemuSaveImageCreateFd(virQEMUDriver *driver, int directFlag = 0; unsigned int wrapperFlags = VIR_FILE_WRAPPER_NON_BLOCKING; - if (flags & VIR_DOMAIN_SAVE_BYPASS_CACHE) { + if (!sparse && flags & VIR_DOMAIN_SAVE_BYPASS_CACHE) { wrapperFlags |= VIR_FILE_WRAPPER_BYPASS_CACHE; directFlag = virFileDirectFdFlag(); if (directFlag < 0) { @@ -459,7 +460,7 @@ qemuSaveImageCreateFd(virQEMUDriver *driver, if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0) return -1; - if (!(wrapperFd = virFileWrapperFdNew(&fd, path, wrapperFlags))) + if (!sparse && !(wrapperFd = virFileWrapperFdNew(&fd, path, wrapperFlags))) return -1; ret = fd; @@ -478,6 +479,7 @@ qemuSaveImageCreate(virQEMUDriver *driver, const char *path, virQEMUSaveData *data, virCommand *compressor, + qemuMigrationParams *saveParams, unsigned int flags, virDomainAsyncJob asyncJob) { @@ -486,9 +488,10 @@ qemuSaveImageCreate(virQEMUDriver *driver, int ret = -1; int fd = -1; virFileWrapperFd *wrapperFd = NULL; + bool sparse = data->header.format == QEMU_SAVE_FORMAT_SPARSE; /* Obtain the file handle. */ - fd = qemuSaveImageCreateFd(driver, vm, path, wrapperFd, &needUnlink, flags); + fd = qemuSaveImageCreateFd(driver, vm, path, wrapperFd, sparse, &needUnlink, flags); if (fd < 0) goto cleanup; @@ -497,7 +500,7 @@ qemuSaveImageCreate(virQEMUDriver *driver, goto cleanup; /* Perform the migration */ - if (qemuMigrationSrcToFile(driver, vm, fd, compressor, asyncJob) < 0) + if (qemuMigrationSrcToFile(driver, vm, &fd, compressor, saveParams, flags, asyncJob) < 0) goto cleanup; /* Touch up file header to mark image complete. */ @@ -505,14 +508,18 @@ qemuSaveImageCreate(virQEMUDriver *driver, /* Reopen the file to touch up the header, since we aren't set * up to seek backwards on wrapperFd. The reopened fd will * trigger a single page of file system cache pollution, but - * that's acceptable. */ - if (VIR_CLOSE(fd) < 0) { - virReportSystemError(errno, _("unable to close %1$s"), path); - goto cleanup; - } + * that's acceptable. + * If using mapped-ram, the fd was passed to qemu, so no need + * to close it. */ + if (!sparse) { + if (VIR_CLOSE(fd) < 0) { + virReportSystemError(errno, _("unable to close %1$s"), path); + goto cleanup; + } - if (qemuDomainFileWrapperFDClose(vm, wrapperFd) < 0) - goto cleanup; + if (qemuDomainFileWrapperFDClose(vm, wrapperFd) < 0) + goto cleanup; + } if ((fd = qemuDomainOpenFile(cfg, vm->def, path, O_WRONLY, NULL)) < 0 || virQEMUSaveDataFinish(data, &fd, path) < 0) @@ -552,7 +559,7 @@ qemuSaveImageGetCompressionProgram(int format, *compressor = NULL; - if (format == QEMU_SAVE_FORMAT_RAW) + if (format == QEMU_SAVE_FORMAT_RAW || format == QEMU_SAVE_FORMAT_SPARSE) return 0; if (!(prog = virFindFileInPath(imageFormat))) { diff --git a/src/qemu/qemu_saveimage.h b/src/qemu/qemu_saveimage.h index 0411f3140a..495d773ea5 100644 --- a/src/qemu/qemu_saveimage.h +++ b/src/qemu/qemu_saveimage.h @@ -136,6 +136,7 @@ qemuSaveImageCreate(virQEMUDriver *driver, const char *path, virQEMUSaveData *data, virCommand *compressor, + qemuMigrationParams *saveParams, unsigned int flags, virDomainAsyncJob asyncJob); diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index 891e67e98b..fa814e051c 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -1661,6 +1661,8 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver, /* do the memory snapshot if necessary */ if (memory) { + g_autoptr(qemuMigrationParams) snap_params = NULL; + /* check if migration is possible */ if (!qemuMigrationSrcIsAllowed(vm, false, VIR_ASYNC_JOB_SNAPSHOT, 0)) goto cleanup; @@ -1691,8 +1693,11 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver, memory_existing = virFileExists(snapdef->memorysnapshotfile); + if (!(snap_params = qemuMigrationParamsNew())) + goto cleanup; + if ((ret = qemuSaveImageCreate(driver, vm, snapdef->memorysnapshotfile, - data, compressor, 0, + data, compressor, snap_params, 0, VIR_ASYNC_JOB_SNAPSHOT)) < 0) goto cleanup; -- 2.43.0

On Wed, Mar 05, 2025 at 03:48:18PM -0700, Jim Fehlig via Devel wrote:
Introduce support for QEMU's new mapped-ram stream format [1]. mapped-ram can be enabled by setting the 'save_image_format' setting in qemu.conf to 'sparse'.
To use mapped-ram with QEMU: - The 'mapped-ram' migration capability must be set to true - The 'multifd' migration capability must be set to true and the 'multifd-channels' migration parameter must set to 1 - QEMU must be provided an fdset containing the migration fd - The 'migrate' qmp command is invoked with a URI referencing the fdset and an offset where to start reading or writing the data stream, e.g.
{"execute":"migrate", "arguments":{"detach":true,"resume":false, "uri":"file:/dev/fdset/0,offset=0x11921"}}
The mapped-ram stream, in conjunction with direct IO and multifd support provided by subsequent patches, can significantly improve the time required to save VM memory state. The following tables compare mapped-ram with the existing, sequential save stream. In all cases, the save and restore operations are to/from a block device comprised of two NVMe disks in RAID0 configuration with xfs (~8600MiB/s). The values in the 'save time' and 'restore time' columns were scraped from the 'real' time reported by time(1). The 'Size' and 'Blocks' columns were provided by the corresponding outputs of stat(1).
VM: 32G RAM, 1 vcpu, idle (shortly after boot)
| save | restore | | time | time | Size | Blocks -----------------------+---------+---------+--------------+-------- legacy | 6.193s | 4.399s | 985744812 | 1925288 -----------------------+---------+---------+--------------+-------- mapped-ram | 5.109s | 1.176s | 34368554354 | 1774472 -----------------------+---------+---------+--------------+-------- legacy + direct IO | 5.725s | 4.512s | 985765251 | 1925328 -----------------------+---------+---------+--------------+-------- mapped-ram + direct IO | 4.627s | 1.490s | 34368554354 | 1774304 -----------------------+---------+---------+--------------+-------- mapped-ram + direct IO | | | | + multifd-channels=8 | 4.421s | 0.845s | 34368554318 | 1774312 -------------------------------------------------------------------
VM: 32G RAM, 30G dirty, 1 vcpu in tight loop dirtying memory
| save | restore | | time | time | Size | Blocks -----------------------+---------+---------+--------------+--------- legacy | 25.800s | 14.332s | 33154309983 | 64754512 -----------------------+---------+---------+--------------+--------- mapped-ram | 18.742s | 15.027s | 34368559228 | 64617160 -----------------------+---------+---------+--------------+--------- legacy + direct IO | 13.115s | 18.050s | 33154310496 | 64754520 -----------------------+---------+---------+--------------+--------- mapped-ram + direct IO | 13.623s | 15.959s | 34368557392 | 64662040 -----------------------+-------- +---------+--------------+--------- mapped-ram + direct IO | | | | + multifd-channels=8 | 6.994s | 6.470s | 34368554980 | 64665776 --------------------------------------------------------------------
As can be seen from the tables, one caveat of mapped-ram is the logical file size of a saved image is basically equivalent to the VM memory size. Note however that mapped-ram typically uses fewer blocks on disk, hence the name 'sparse' for 'save_image_format'.
Also note the mapped-ram stream is incompatible with the existing stream format, hence mapped-ram cannot be used to restore an image saved with the existing format and vice versa.
[1] https://gitlab.com/qemu-project/qemu/-/blob/master/docs/devel/migration/mapp...
Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_driver.c | 23 +++++- src/qemu/qemu_migration.c | 149 ++++++++++++++++++++++++++------------ src/qemu/qemu_migration.h | 4 +- src/qemu/qemu_monitor.c | 34 +++++++++ src/qemu/qemu_monitor.h | 4 + src/qemu/qemu_saveimage.c | 31 +++++--- src/qemu/qemu_saveimage.h | 1 + src/qemu/qemu_snapshot.c | 7 +- 8 files changed, 191 insertions(+), 62 deletions(-)
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

qemuProcessStartWithMemoryState() is the only caller of qemuProcessStart() that uses the qemuProcessIncomingDef struct. Move creation of the struct to qemuProcessStartWithMemoryState(). Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_process.c | 42 ++++++++++++++++++++--------------------- src/qemu/qemu_process.h | 2 +- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 7b7b5c17e3..cf99e4461d 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -8347,7 +8347,7 @@ qemuProcessStart(virConnectPtr conn, virDomainObj *vm, virCPUDef *updatedCPU, virDomainAsyncJob asyncJob, - const char *migrateFrom, + qemuProcessIncomingDef *incoming, int migrateFd, const char *migratePath, virDomainMomentObj *snapshot, @@ -8355,7 +8355,6 @@ qemuProcessStart(virConnectPtr conn, unsigned int flags) { qemuDomainObjPrivate *priv = vm->privateData; - qemuProcessIncomingDef *incoming = NULL; unsigned int stopFlags; bool relabel = false; bool relabelSavedState = false; @@ -8363,11 +8362,11 @@ qemuProcessStart(virConnectPtr conn, int rv; VIR_DEBUG("conn=%p driver=%p vm=%p name=%s id=%d asyncJob=%s " - "migrateFrom=%s migrateFd=%d migratePath=%s " + "incoming=%p migrateFd=%d migratePath=%s " "snapshot=%p vmop=%d flags=0x%x", conn, driver, vm, vm->def->name, vm->def->id, virDomainAsyncJobTypeToString(asyncJob), - NULLSTR(migrateFrom), migrateFd, NULLSTR(migratePath), + incoming, migrateFd, NULLSTR(migratePath), snapshot, vmop, flags); virCheckFlagsGoto(VIR_QEMU_PROCESS_START_COLD | @@ -8376,20 +8375,13 @@ qemuProcessStart(virConnectPtr conn, VIR_QEMU_PROCESS_START_GEN_VMID | VIR_QEMU_PROCESS_START_RESET_NVRAM, cleanup); - if (!migrateFrom && !snapshot) + if (!incoming && !snapshot) flags |= VIR_QEMU_PROCESS_START_NEW; if (qemuProcessInit(driver, vm, updatedCPU, - asyncJob, !!migrateFrom, flags) < 0) + asyncJob, !!incoming, flags) < 0) goto cleanup; - if (migrateFrom) { - incoming = qemuProcessIncomingDefNew(priv->qemuCaps, NULL, migrateFrom, - migrateFd, migratePath); - if (!incoming) - goto stop; - } - if (qemuProcessPrepareDomain(driver, vm, flags) < 0) goto stop; @@ -8442,14 +8434,13 @@ qemuProcessStart(virConnectPtr conn, qemuSecurityRestoreSavedStateLabel(driver->securityManager, vm->def, migratePath) < 0) VIR_WARN("failed to restore save state label on %s", migratePath); - qemuProcessIncomingDefFree(incoming); return ret; stop: stopFlags = 0; if (!relabel) stopFlags |= VIR_QEMU_PROCESS_STOP_NO_RELABEL; - if (migrateFrom) + if (incoming) stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED; if (priv->mon) qemuMonitorSetDomainLog(priv->mon, NULL, NULL, NULL); @@ -8503,8 +8494,9 @@ qemuProcessStartWithMemoryState(virConnectPtr conn, VIR_AUTOCLOSE intermediatefd = -1; g_autoptr(virCommand) cmd = NULL; g_autofree char *errbuf = NULL; - const char *migrateFrom = NULL; + qemuProcessIncomingDef *incoming = NULL; int rc = 0; + int ret = -1; if (data) { if (virSaveCookieParseString(data->cookie, (virObject **)&cookie, @@ -8515,10 +8507,15 @@ qemuProcessStartWithMemoryState(virConnectPtr conn, &errbuf, &cmd) < 0) { return -1; } - - migrateFrom = "stdio"; } + /* The fd passed to qemuProcessIncomingDefNew is used to create the migration + * URI, so it must be called after starting the decompression program. + */ + incoming = qemuProcessIncomingDefNew(priv->qemuCaps, NULL, "stdio", *fd, path); + if (!incoming) + return -1; + /* No cookie means libvirt which saved the domain was too old to mess up * the CPU definitions. */ @@ -8529,7 +8526,7 @@ qemuProcessStartWithMemoryState(virConnectPtr conn, priv->disableSlirp = true; if (qemuProcessStart(conn, driver, vm, cookie ? cookie->cpu : NULL, - asyncJob, migrateFrom, *fd, path, snapshot, + asyncJob, incoming, *fd, path, snapshot, VIR_NETDEV_VPORT_PROFILE_OP_RESTORE, start_flags) == 0) *started = true; @@ -8541,14 +8538,17 @@ qemuProcessStartWithMemoryState(virConnectPtr conn, virDomainAuditStart(vm, reason, *started); if (!*started || rc < 0) - return -1; + goto cleanup; /* qemuProcessStart doesn't unset the qemu error reporting infrastructure * in case of migration (which is used in this case) so we need to reset it * so that the handle to virtlogd is not held open unnecessarily */ qemuMonitorSetDomainLog(qemuDomainGetMonitor(vm), NULL, NULL, NULL); + ret = 0; - return 0; + cleanup: + qemuProcessIncomingDefFree(incoming); + return ret; } diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h index fee00ce53b..a9e0a03a21 100644 --- a/src/qemu/qemu_process.h +++ b/src/qemu/qemu_process.h @@ -83,7 +83,7 @@ int qemuProcessStart(virConnectPtr conn, virDomainObj *vm, virCPUDef *updatedCPU, virDomainAsyncJob asyncJob, - const char *migrateFrom, + qemuProcessIncomingDef *incoming, int stdin_fd, const char *stdin_path, virDomainMomentObj *snapshot, -- 2.43.0

On Wed, Mar 05, 2025 at 03:48:19PM -0700, Jim Fehlig via Devel wrote:
qemuProcessStartWithMemoryState() is the only caller of qemuProcessStart() that uses the qemuProcessIncomingDef struct. Move creation of the struct to qemuProcessStartWithMemoryState().
Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_process.c | 42 ++++++++++++++++++++--------------------- src/qemu/qemu_process.h | 2 +- 2 files changed, 22 insertions(+), 22 deletions(-)
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

Similar to qemuMigrationSrcRun, apply migration parameters in qemuMigrationDstRun. This allows callers to create customized migration parameters, but delegates their application to the function performing the migration. Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_migration.c | 16 ++++++++++------ src/qemu/qemu_migration.h | 5 ++++- src/qemu/qemu_process.c | 2 +- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index f692986056..329b5f61d4 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -2427,7 +2427,10 @@ qemuMigrationDstGetURI(const char *migrateFrom, int qemuMigrationDstRun(virDomainObj *vm, const char *uri, - virDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob, + qemuMigrationParams *migParams, + unsigned int flags) + { virTristateBool exitOnError = VIR_TRISTATE_BOOL_ABSENT; qemuDomainObjPrivate *priv = vm->privateData; @@ -2435,6 +2438,10 @@ qemuMigrationDstRun(virDomainObj *vm, VIR_DEBUG("Setting up incoming migration with URI %s", uri); + if (migParams && qemuMigrationParamsApply(vm, asyncJob, + migParams, flags) < 0) + return -1; + /* Ask QEMU not to exit on failure during incoming migration (if supported) * so that we can properly check and report error during Finish phase. */ @@ -3368,10 +3375,6 @@ qemuMigrationDstPrepareActive(virQEMUDriver *driver, goto error; } - if (qemuMigrationParamsApply(vm, VIR_ASYNC_JOB_MIGRATION_IN, - migParams, flags) < 0) - goto error; - if (mig->nbd && flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC)) { const char *nbdTLSAlias = NULL; @@ -3403,7 +3406,8 @@ qemuMigrationDstPrepareActive(virQEMUDriver *driver, } if (qemuMigrationDstRun(vm, incoming->uri, - VIR_ASYNC_JOB_MIGRATION_IN) < 0) + VIR_ASYNC_JOB_MIGRATION_IN, + migParams, flags) < 0) goto error; if (qemuProcessFinishStartup(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN, diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 9fa007b949..71a9974753 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -283,7 +283,10 @@ qemuMigrationDstGetURI(const char *migrateFrom, int qemuMigrationDstRun(virDomainObj *vm, const char *uri, - virDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob, + qemuMigrationParams *migParams, + unsigned int flags); + void qemuMigrationSrcPostcopyFailed(virDomainObj *vm); diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index cf99e4461d..23b2d4a7b6 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -8404,7 +8404,7 @@ qemuProcessStart(virConnectPtr conn, relabel = true; if (incoming) { - if (qemuMigrationDstRun(vm, incoming->uri, asyncJob) < 0) + if (qemuMigrationDstRun(vm, incoming->uri, asyncJob, NULL, 0) < 0) goto stop; } else { /* Refresh state of devices from QEMU. During migration this happens -- 2.43.0

On Wed, Mar 05, 2025 at 03:48:20PM -0700, Jim Fehlig via Devel wrote:
Similar to qemuMigrationSrcRun, apply migration parameters in qemuMigrationDstRun. This allows callers to create customized migration parameters, but delegates their application to the function performing the migration.
Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_migration.c | 16 ++++++++++------ src/qemu/qemu_migration.h | 5 ++++- src/qemu/qemu_process.c | 2 +- 3 files changed, 15 insertions(+), 8 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index f692986056..329b5f61d4 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -2427,7 +2427,10 @@ qemuMigrationDstGetURI(const char *migrateFrom, int qemuMigrationDstRun(virDomainObj *vm, const char *uri, - virDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob, + qemuMigrationParams *migParams, + unsigned int flags) + { virTristateBool exitOnError = VIR_TRISTATE_BOOL_ABSENT; qemuDomainObjPrivate *priv = vm->privateData; @@ -2435,6 +2438,10 @@ qemuMigrationDstRun(virDomainObj *vm,
VIR_DEBUG("Setting up incoming migration with URI %s", uri);
+ if (migParams && qemuMigrationParamsApply(vm, asyncJob, + migParams, flags) < 0) + return -1; + /* Ask QEMU not to exit on failure during incoming migration (if supported) * so that we can properly check and report error during Finish phase. */ @@ -3368,10 +3375,6 @@ qemuMigrationDstPrepareActive(virQEMUDriver *driver, goto error; }
- if (qemuMigrationParamsApply(vm, VIR_ASYNC_JOB_MIGRATION_IN, - migParams, flags) < 0) - goto error; -
So semantically, we now apply the dst migration parameters /after/ starting the NBD server, instead of before. This seems like it ought to be safe, as migration is independent of the NBD block layer.
if (mig->nbd && flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC)) { const char *nbdTLSAlias = NULL; @@ -3403,7 +3406,8 @@ qemuMigrationDstPrepareActive(virQEMUDriver *driver, }
if (qemuMigrationDstRun(vm, incoming->uri, - VIR_ASYNC_JOB_MIGRATION_IN) < 0) + VIR_ASYNC_JOB_MIGRATION_IN, + migParams, flags) < 0) goto error;
if (qemuProcessFinishStartup(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

On 3/19/25 09:45, Daniel P. Berrangé wrote:
On Wed, Mar 05, 2025 at 03:48:20PM -0700, Jim Fehlig via Devel wrote:
Similar to qemuMigrationSrcRun, apply migration parameters in qemuMigrationDstRun. This allows callers to create customized migration parameters, but delegates their application to the function performing the migration.
Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_migration.c | 16 ++++++++++------ src/qemu/qemu_migration.h | 5 ++++- src/qemu/qemu_process.c | 2 +- 3 files changed, 15 insertions(+), 8 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index f692986056..329b5f61d4 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -2427,7 +2427,10 @@ qemuMigrationDstGetURI(const char *migrateFrom, int qemuMigrationDstRun(virDomainObj *vm, const char *uri, - virDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob, + qemuMigrationParams *migParams, + unsigned int flags) + { virTristateBool exitOnError = VIR_TRISTATE_BOOL_ABSENT; qemuDomainObjPrivate *priv = vm->privateData; @@ -2435,6 +2438,10 @@ qemuMigrationDstRun(virDomainObj *vm,
VIR_DEBUG("Setting up incoming migration with URI %s", uri);
+ if (migParams && qemuMigrationParamsApply(vm, asyncJob, + migParams, flags) < 0) + return -1; + /* Ask QEMU not to exit on failure during incoming migration (if supported) * so that we can properly check and report error during Finish phase. */ @@ -3368,10 +3375,6 @@ qemuMigrationDstPrepareActive(virQEMUDriver *driver, goto error; }
- if (qemuMigrationParamsApply(vm, VIR_ASYNC_JOB_MIGRATION_IN, - migParams, flags) < 0) - goto error; -
So semantically, we now apply the dst migration parameters /after/ starting the NBD server, instead of before. This seems like it ought to be safe, as migration is independent of the NBD block layer.
Good point. I took another look and agree it should be fine. The interesting scenario is failure to apply the migration params. Prior to this patch the NBD server would not have been started. After the patch it would be running. Shouldn't be a problem though, since the code already handles other misc failures after starting the NBD server. Regards, Jim

Add support for the mapped-ram migration capability on restore. Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_driver.c | 27 +++++++++++++++++++------- src/qemu/qemu_migration.c | 12 ++++++------ src/qemu/qemu_process.c | 41 ++++++++++++++++++++++++++++----------- src/qemu/qemu_process.h | 15 +++++++++----- src/qemu/qemu_saveimage.c | 29 ++++++++++++++++----------- src/qemu/qemu_saveimage.h | 2 ++ src/qemu/qemu_snapshot.c | 8 ++++---- 7 files changed, 90 insertions(+), 44 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 2b2dda9d26..11a67fab57 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -1593,7 +1593,7 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn, } if (qemuProcessStart(conn, driver, vm, NULL, VIR_ASYNC_JOB_START, - NULL, -1, NULL, NULL, + NULL, -1, NULL, NULL, NULL, VIR_NETDEV_VPORT_PROFILE_OP_CREATE, start_flags) < 0) { virDomainAuditStart(vm, "booted", false); @@ -5758,6 +5758,8 @@ qemuDomainRestoreInternal(virConnectPtr conn, virFileWrapperFd *wrapperFd = NULL; bool hook_taint = false; bool reset_nvram = false; + bool sparse = false; + g_autoptr(qemuMigrationParams) restoreParams = NULL; virCheckFlags(VIR_DOMAIN_SAVE_BYPASS_CACHE | VIR_DOMAIN_SAVE_RUNNING | @@ -5770,9 +5772,13 @@ qemuDomainRestoreInternal(virConnectPtr conn, if (qemuSaveImageGetMetadata(driver, NULL, path, &def, &data) < 0) goto cleanup; + sparse = data->header.format == QEMU_SAVE_FORMAT_SPARSE; + if (!(restoreParams = qemuMigrationParamsForSave(sparse))) + goto cleanup; + fd = qemuSaveImageOpen(driver, path, (flags & VIR_DOMAIN_SAVE_BYPASS_CACHE) != 0, - &wrapperFd, false); + sparse, &wrapperFd, false); if (fd < 0) goto cleanup; @@ -5826,7 +5832,7 @@ qemuDomainRestoreInternal(virConnectPtr conn, if (qemuProcessBeginJob(vm, VIR_DOMAIN_JOB_OPERATION_RESTORE, flags) < 0) goto cleanup; - ret = qemuSaveImageStartVM(conn, driver, vm, &fd, data, path, + ret = qemuSaveImageStartVM(conn, driver, vm, &fd, data, path, restoreParams, false, reset_nvram, VIR_ASYNC_JOB_START); qemuProcessEndJob(vm); @@ -5941,7 +5947,8 @@ qemuDomainSaveImageDefineXML(virConnectPtr conn, const char *path, if (qemuSaveImageGetMetadata(driver, NULL, path, &def, &data) < 0) goto cleanup; - fd = qemuSaveImageOpen(driver, path, false, NULL, true); + fd = qemuSaveImageOpen(driver, path, false, false, NULL, false); + if (fd < 0) goto cleanup; @@ -6080,6 +6087,8 @@ qemuDomainObjRestore(virConnectPtr conn, g_autofree char *xmlout = NULL; virQEMUSaveData *data = NULL; virFileWrapperFd *wrapperFd = NULL; + bool sparse = false; + g_autoptr(qemuMigrationParams) restoreParams = NULL; ret = qemuSaveImageGetMetadata(driver, NULL, path, &def, &data); if (ret < 0) { @@ -6097,7 +6106,11 @@ qemuDomainObjRestore(virConnectPtr conn, goto cleanup; } - fd = qemuSaveImageOpen(driver, path, bypass_cache, &wrapperFd, false); + sparse = data->header.format == QEMU_SAVE_FORMAT_SPARSE; + if (!(restoreParams = qemuMigrationParamsForSave(sparse))) + return -1; + + fd = qemuSaveImageOpen(driver, path, bypass_cache, sparse, &wrapperFd, false); if (fd < 0) goto cleanup; @@ -6139,7 +6152,7 @@ qemuDomainObjRestore(virConnectPtr conn, virDomainObjAssignDef(vm, &def, true, NULL); - ret = qemuSaveImageStartVM(conn, driver, vm, &fd, data, path, + ret = qemuSaveImageStartVM(conn, driver, vm, &fd, data, path, restoreParams, start_paused, reset_nvram, asyncJob); cleanup: @@ -6345,7 +6358,7 @@ qemuDomainObjStart(virConnectPtr conn, } ret = qemuProcessStart(conn, driver, vm, NULL, asyncJob, - NULL, -1, NULL, NULL, + NULL, -1, NULL, NULL, NULL, VIR_NETDEV_VPORT_PROFILE_OP_CREATE, start_flags); virDomainAuditStart(vm, "booted", ret >= 0); if (ret >= 0) { diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 329b5f61d4..1a85ec1c51 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -3115,9 +3115,8 @@ qemuMigrationDstPrepare(virDomainObj *vm, const char *protocol, const char *listenAddress, unsigned short port, - int fd) + int *fd) { - qemuDomainObjPrivate *priv = vm->privateData; g_autofree char *migrateFrom = NULL; if (tunnel) { @@ -3171,8 +3170,9 @@ qemuMigrationDstPrepare(virDomainObj *vm, migrateFrom = g_strdup_printf(incFormat, protocol, listenAddress, port); } - return qemuProcessIncomingDefNew(priv->qemuCaps, listenAddress, - migrateFrom, fd, NULL); + return qemuProcessIncomingDefNew(vm, listenAddress, + migrateFrom, fd, + NULL, NULL); } @@ -3314,7 +3314,7 @@ qemuMigrationDstPrepareActive(virQEMUDriver *driver, if (!(incoming = qemuMigrationDstPrepare(vm, tunnel, protocol, listenAddress, port, - dataFD[0]))) + &dataFD[0]))) goto error; qemuMigrationDstPrepareDiskSeclabels(vm, migrate_disks, flags); @@ -3685,7 +3685,7 @@ qemuMigrationDstPrepareResume(virQEMUDriver *driver, priv->origname = g_strdup(origname); if (!(incoming = qemuMigrationDstPrepare(vm, false, protocol, - listenAddress, port, -1))) + listenAddress, port, NULL))) goto cleanup; if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_IN) < 0) diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 23b2d4a7b6..cba44ec5c2 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -4827,6 +4827,7 @@ qemuProcessIncomingDefFree(qemuProcessIncomingDef *inc) g_free(inc->address); g_free(inc->uri); + qemuFDPassFree(inc->fdPassMigrate); g_free(inc); } @@ -4840,26 +4841,38 @@ qemuProcessIncomingDefFree(qemuProcessIncomingDef *inc) * qemuProcessIncomingDefFree will NOT close it. */ qemuProcessIncomingDef * -qemuProcessIncomingDefNew(virQEMUCaps *qemuCaps, +qemuProcessIncomingDefNew(virDomainObj *vm, const char *listenAddress, const char *migrateFrom, - int fd, - const char *path) + int *fd, + const char *path, + virQEMUSaveData *data) { + qemuDomainObjPrivate *priv = vm->privateData; qemuProcessIncomingDef *inc = NULL; - if (qemuMigrationDstCheckProtocol(qemuCaps, migrateFrom) < 0) + if (qemuMigrationDstCheckProtocol(priv->qemuCaps, migrateFrom) < 0) return NULL; inc = g_new0(qemuProcessIncomingDef, 1); inc->address = g_strdup(listenAddress); - inc->uri = qemuMigrationDstGetURI(migrateFrom, fd); + if (data && data->header.format == QEMU_SAVE_FORMAT_SPARSE) { + size_t offset = sizeof(virQEMUSaveHeader) + data->header.data_len; + + inc->fdPassMigrate = qemuFDPassNew("libvirt-incoming-migrate", priv); + qemuFDPassAddFD(inc->fdPassMigrate, fd, "-fd"); + inc->uri = g_strdup_printf("file:%s,offset=%#lx", + qemuFDPassGetPath(inc->fdPassMigrate), offset); + } else { + inc->uri = qemuMigrationDstGetURI(migrateFrom, *fd); + } + if (!inc->uri) goto error; - inc->fd = fd; + inc->fd = *fd; inc->path = path; return inc; @@ -7931,8 +7944,11 @@ qemuProcessLaunch(virConnectPtr conn, &nnicindexes, &nicindexes))) goto cleanup; - if (incoming && incoming->fd != -1) - virCommandPassFD(cmd, incoming->fd, 0); + if (incoming) { + if (incoming->fd != -1) + virCommandPassFD(cmd, incoming->fd, 0); + qemuFDPassTransferCommand(incoming->fdPassMigrate, cmd); + } /* now that we know it is about to start call the hook if present */ if (qemuProcessStartHook(driver, vm, @@ -8351,6 +8367,7 @@ qemuProcessStart(virConnectPtr conn, int migrateFd, const char *migratePath, virDomainMomentObj *snapshot, + qemuMigrationParams *migParams, virNetDevVPortProfileOp vmop, unsigned int flags) { @@ -8404,7 +8421,7 @@ qemuProcessStart(virConnectPtr conn, relabel = true; if (incoming) { - if (qemuMigrationDstRun(vm, incoming->uri, asyncJob, NULL, 0) < 0) + if (qemuMigrationDstRun(vm, incoming->uri, asyncJob, migParams, 0) < 0) goto stop; } else { /* Refresh state of devices from QEMU. During migration this happens @@ -8458,6 +8475,7 @@ qemuProcessStart(virConnectPtr conn, * @path: path to memory state file * @snapshot: internal snapshot to load when starting QEMU process or NULL * @data: data from memory state file or NULL + * @migParams: Migration params to use on restore or NULL * @asyncJob: type of asynchronous job * @start_flags: flags to start QEMU process with * @reason: audit log reason @@ -8484,6 +8502,7 @@ qemuProcessStartWithMemoryState(virConnectPtr conn, const char *path, virDomainMomentObj *snapshot, virQEMUSaveData *data, + qemuMigrationParams *migParams, virDomainAsyncJob asyncJob, unsigned int start_flags, const char *reason, @@ -8512,7 +8531,7 @@ qemuProcessStartWithMemoryState(virConnectPtr conn, /* The fd passed to qemuProcessIncomingDefNew is used to create the migration * URI, so it must be called after starting the decompression program. */ - incoming = qemuProcessIncomingDefNew(priv->qemuCaps, NULL, "stdio", *fd, path); + incoming = qemuProcessIncomingDefNew(vm, NULL, "stdio", fd, path, data); if (!incoming) return -1; @@ -8527,7 +8546,7 @@ qemuProcessStartWithMemoryState(virConnectPtr conn, if (qemuProcessStart(conn, driver, vm, cookie ? cookie->cpu : NULL, asyncJob, incoming, *fd, path, snapshot, - VIR_NETDEV_VPORT_PROFILE_OP_RESTORE, + migParams, VIR_NETDEV_VPORT_PROFILE_OP_RESTORE, start_flags) == 0) *started = true; diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h index a9e0a03a21..c51335ad7a 100644 --- a/src/qemu/qemu_process.h +++ b/src/qemu/qemu_process.h @@ -53,14 +53,17 @@ struct _qemuProcessIncomingDef { char *address; /* address where QEMU is supposed to listen */ char *uri; /* used when calling migrate-incoming QMP command */ int fd; /* for fd:N URI */ + qemuFDPass *fdPassMigrate; /* for file:/dev/fdset/n,offset=x URI */ const char *path; /* path associated with fd */ }; -qemuProcessIncomingDef *qemuProcessIncomingDefNew(virQEMUCaps *qemuCaps, - const char *listenAddress, - const char *migrateFrom, - int fd, - const char *path); +qemuProcessIncomingDef *qemuProcessIncomingDefNew(virDomainObj *vm, + const char *listenAddress, + const char *migrateFrom, + int *fd, + const char *path, + virQEMUSaveData *data); + void qemuProcessIncomingDefFree(qemuProcessIncomingDef *inc); int qemuProcessBeginJob(virDomainObj *vm, @@ -87,6 +90,7 @@ int qemuProcessStart(virConnectPtr conn, int stdin_fd, const char *stdin_path, virDomainMomentObj *snapshot, + qemuMigrationParams *migParams, virNetDevVPortProfileOp vmop, unsigned int flags); @@ -97,6 +101,7 @@ int qemuProcessStartWithMemoryState(virConnectPtr conn, const char *path, virDomainMomentObj *snapshot, virQEMUSaveData *data, + qemuMigrationParams *migParams, virDomainAsyncJob asyncJob, unsigned int start_flags, const char *reason, diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c index cad982f450..f00078bfcf 100644 --- a/src/qemu/qemu_saveimage.c +++ b/src/qemu/qemu_saveimage.c @@ -348,7 +348,8 @@ qemuSaveImageDecompressionStart(virQEMUSaveData *data, if (header->version != 2) return 0; - if (header->format == QEMU_SAVE_FORMAT_RAW) + if (header->format == QEMU_SAVE_FORMAT_RAW || + header->format == QEMU_SAVE_FORMAT_SPARSE) return 0; if (!(cmd = qemuSaveImageGetCompressionCommand(header->format))) @@ -656,6 +657,7 @@ qemuSaveImageGetMetadata(virQEMUDriver *driver, * @driver: qemu driver data * @path: path of the save image * @bypass_cache: bypass cache when opening the file + * @sparse: Image contains mapped-ram save format * @wrapperFd: returns the file wrapper structure * @open_write: open the file for writing (for updates) * @@ -665,6 +667,7 @@ int qemuSaveImageOpen(virQEMUDriver *driver, const char *path, bool bypass_cache, + bool sparse, virFileWrapperFd **wrapperFd, bool open_write) { @@ -686,15 +689,18 @@ qemuSaveImageOpen(virQEMUDriver *driver, if ((fd = qemuDomainOpenFile(cfg, NULL, path, oflags, NULL)) < 0) return -1; - if (bypass_cache && - !(*wrapperFd = virFileWrapperFdNew(&fd, path, - VIR_FILE_WRAPPER_BYPASS_CACHE))) - return -1; + /* If sparse, no need for the iohelper or positioning the file pointer. */ + if (!sparse) { + if (bypass_cache && + !(*wrapperFd = virFileWrapperFdNew(&fd, path, + VIR_FILE_WRAPPER_BYPASS_CACHE))) + return -1; - /* Read the header to position the file pointer for QEMU. Unfortunately we - * can't use lseek with virFileWrapperFD. */ - if (qemuSaveImageReadHeader(fd, NULL) < 0) - return -1; + /* Read the header to position the file pointer for QEMU. Unfortunately we + * can't use lseek with virFileWrapperFD. */ + if (qemuSaveImageReadHeader(fd, NULL) < 0) + return -1; + } ret = fd; fd = -1; @@ -710,6 +716,7 @@ qemuSaveImageStartVM(virConnectPtr conn, int *fd, virQEMUSaveData *data, const char *path, + qemuMigrationParams *restoreParams, bool start_paused, bool reset_nvram, virDomainAsyncJob asyncJob) @@ -726,8 +733,8 @@ qemuSaveImageStartVM(virConnectPtr conn, start_flags |= VIR_QEMU_PROCESS_START_RESET_NVRAM; if (qemuProcessStartWithMemoryState(conn, driver, vm, fd, path, NULL, data, - asyncJob, start_flags, "restored", - &started) < 0) { + restoreParams, asyncJob, start_flags, + "restored", &started) < 0) { goto cleanup; } diff --git a/src/qemu/qemu_saveimage.h b/src/qemu/qemu_saveimage.h index 495d773ea5..89c6941385 100644 --- a/src/qemu/qemu_saveimage.h +++ b/src/qemu/qemu_saveimage.h @@ -83,6 +83,7 @@ qemuSaveImageStartVM(virConnectPtr conn, int *fd, virQEMUSaveData *data, const char *path, + qemuMigrationParams *restoreParams, bool start_paused, bool reset_nvram, virDomainAsyncJob asyncJob) @@ -105,6 +106,7 @@ int qemuSaveImageOpen(virQEMUDriver *driver, const char *path, bool bypass_cache, + bool sparse, virFileWrapperFd **wrapperFd, bool open_write) ATTRIBUTE_NONNULL(2) ATTRIBUTE_NONNULL(4); diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index fa814e051c..6b6a387d22 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -2413,7 +2413,7 @@ qemuSnapshotRevertExternalPrepare(virDomainObj *vm, return -1; memdata->fd = qemuSaveImageOpen(driver, memdata->path, - false, NULL, false); + false, false, NULL, false); if (memdata->fd < 0) return -1; @@ -2653,7 +2653,7 @@ qemuSnapshotRevertActive(virDomainObj *vm, if (qemuProcessStartWithMemoryState(snapshot->domain->conn, driver, vm, &memdata.fd, memdata.path, loadSnap, - memdata.data, VIR_ASYNC_JOB_SNAPSHOT, + memdata.data, NULL, VIR_ASYNC_JOB_SNAPSHOT, start_flags, "from-snapshot", &started) < 0) { if (started) { @@ -2807,7 +2807,7 @@ qemuSnapshotRevertInactive(virDomainObj *vm, rc = qemuProcessStart(snapshot->domain->conn, driver, vm, NULL, VIR_ASYNC_JOB_SNAPSHOT, NULL, -1, NULL, NULL, - VIR_NETDEV_VPORT_PROFILE_OP_CREATE, + NULL, VIR_NETDEV_VPORT_PROFILE_OP_CREATE, start_flags); virDomainAuditStart(vm, "from-snapshot", rc >= 0); if (rc < 0) { @@ -3283,7 +3283,7 @@ qemuSnapshotDeleteExternalPrepare(virDomainObj *vm, if (!virDomainObjIsActive(vm)) { if (qemuProcessStart(NULL, driver, vm, NULL, VIR_ASYNC_JOB_SNAPSHOT, - NULL, -1, NULL, NULL, + NULL, -1, NULL, NULL, NULL, VIR_NETDEV_VPORT_PROFILE_OP_CREATE, VIR_QEMU_PROCESS_START_PAUSED) < 0) { return -1; -- 2.43.0

On Wed, Mar 05, 2025 at 03:48:21PM -0700, Jim Fehlig via Devel wrote:
Add support for the mapped-ram migration capability on restore.
Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_driver.c | 27 +++++++++++++++++++------- src/qemu/qemu_migration.c | 12 ++++++------ src/qemu/qemu_process.c | 41 ++++++++++++++++++++++++++++----------- src/qemu/qemu_process.h | 15 +++++++++----- src/qemu/qemu_saveimage.c | 29 ++++++++++++++++----------- src/qemu/qemu_saveimage.h | 2 ++ src/qemu/qemu_snapshot.c | 8 ++++---- 7 files changed, 90 insertions(+), 44 deletions(-)
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

When using the mapped-ram migration capability, direct IO is enabled by setting the "direct-io" migration parameter to "true" and passing QEMU an additional fd with O_DIRECT set. Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_driver.c | 10 ++++++---- src/qemu/qemu_migration.c | 32 ++++++++++++++++++++++++++------ src/qemu/qemu_migration.h | 1 + src/qemu/qemu_migration_params.c | 11 ++++++++++- src/qemu/qemu_migration_params.h | 3 ++- src/qemu/qemu_monitor.c | 7 +++++-- src/qemu/qemu_monitor.h | 3 ++- src/qemu/qemu_saveimage.c | 2 +- 8 files changed, 53 insertions(+), 16 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 11a67fab57..e098a96a4e 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -2679,7 +2679,8 @@ qemuDomainSaveInternal(virQEMUDriver *driver, goto endjob; xml = NULL; - if (!(saveParams = qemuMigrationParamsForSave(format == QEMU_SAVE_FORMAT_SPARSE))) + if (!(saveParams = qemuMigrationParamsForSave(format == QEMU_SAVE_FORMAT_SPARSE, + flags))) goto endjob; ret = qemuSaveImageCreate(driver, vm, path, data, compressor, @@ -3129,7 +3130,7 @@ doCoreDump(virQEMUDriver *driver, if (!(dump_params = qemuMigrationParamsNew())) goto cleanup; - if (qemuMigrationSrcToFile(driver, vm, &fd, compressor, + if (qemuMigrationSrcToFile(driver, vm, path, &fd, compressor, dump_params, dump_flags, VIR_ASYNC_JOB_DUMP) < 0) goto cleanup; } @@ -5773,7 +5774,7 @@ qemuDomainRestoreInternal(virConnectPtr conn, goto cleanup; sparse = data->header.format == QEMU_SAVE_FORMAT_SPARSE; - if (!(restoreParams = qemuMigrationParamsForSave(sparse))) + if (!(restoreParams = qemuMigrationParamsForSave(sparse, flags))) goto cleanup; fd = qemuSaveImageOpen(driver, path, @@ -6107,7 +6108,8 @@ qemuDomainObjRestore(virConnectPtr conn, } sparse = data->header.format == QEMU_SAVE_FORMAT_SPARSE; - if (!(restoreParams = qemuMigrationParamsForSave(sparse))) + if (!(restoreParams = qemuMigrationParamsForSave(sparse, + bypass_cache ? VIR_DOMAIN_SAVE_BYPASS_CACHE : 0))) return -1; fd = qemuSaveImageOpen(driver, path, bypass_cache, sparse, &wrapperFd, false); diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 1a85ec1c51..4554a52165 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -7188,17 +7188,36 @@ qemuMigrationSrcToLegacyFile(virQEMUDriver *driver, static int qemuMigrationSrcToSparseFile(virQEMUDriver *driver, virDomainObj *vm, + const char *path, int *fd, unsigned int flags, virDomainAsyncJob asyncJob) { + g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); + VIR_AUTOCLOSE directFd = -1; + int directFlag = 0; + bool needUnlink = false; int ret; - /* mapped-ram does not support directIO */ + /* When using directio with mapped-ram, qemu needs two fds. One with + * O_DIRECT set writing the memory, and another without it set for + * writing small bits of unaligned state. */ if ((flags & VIR_DOMAIN_SAVE_BYPASS_CACHE)) { - virReportError(VIR_ERR_OPERATION_FAILED, "%s", - _("bypass cache unsupported by this system")); - return -1; + directFlag = virFileDirectFdFlag(); + if (directFlag < 0) { + virReportError(VIR_ERR_OPERATION_FAILED, "%s", + _("bypass cache unsupported by this system")); + return -1; + } + directFd = virQEMUFileOpenAs(cfg->user, cfg->group, false, path, + O_WRONLY | directFlag, &needUnlink); + + if (directFd < 0) + return -1; + + if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, directFd) < 0) + return -1; + } if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, *fd) < 0) @@ -7207,7 +7226,7 @@ qemuMigrationSrcToSparseFile(virQEMUDriver *driver, if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; - ret = qemuMonitorMigrateToFdSet(vm, 0, fd); + ret = qemuMonitorMigrateToFdSet(vm, 0, fd, &directFd); qemuDomainObjExitMonitor(vm); return ret; } @@ -7216,6 +7235,7 @@ qemuMigrationSrcToSparseFile(virQEMUDriver *driver, /* Helper function called while vm is active. */ int qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm, + const char *path, int *fd, virCommand *compressor, qemuMigrationParams *migParams, @@ -7253,7 +7273,7 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm, if (migParams && qemuMigrationParamsCapEnabled(migParams, QEMU_MIGRATION_CAP_MAPPED_RAM)) - rc = qemuMigrationSrcToSparseFile(driver, vm, fd, flags, asyncJob); + rc = qemuMigrationSrcToSparseFile(driver, vm, path, fd, flags, asyncJob); else rc = qemuMigrationSrcToLegacyFile(driver, vm, *fd, compressor, asyncJob); diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 71a9974753..beb888160a 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -238,6 +238,7 @@ qemuMigrationSrcIsAllowed(virDomainObj *vm, int qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm, + const char *path, int *fd, virCommand *compressor, qemuMigrationParams *migParams, diff --git a/src/qemu/qemu_migration_params.c b/src/qemu/qemu_migration_params.c index d48cdd5506..16e93f27d6 100644 --- a/src/qemu/qemu_migration_params.c +++ b/src/qemu/qemu_migration_params.c @@ -130,6 +130,7 @@ VIR_ENUM_IMPL(qemuMigrationParam, "multifd-zlib-level", "multifd-zstd-level", "avail-switchover-bandwidth", + "direct-io", ); typedef struct _qemuMigrationParamsAlwaysOnItem qemuMigrationParamsAlwaysOnItem; @@ -328,6 +329,9 @@ static const qemuMigrationParamInfoItem qemuMigrationParamInfo[] = { [QEMU_MIGRATION_PARAM_AVAIL_SWITCHOVER_BANDWIDTH] = { .type = QEMU_MIGRATION_PARAM_TYPE_ULL, }, + [QEMU_MIGRATION_PARAM_DIRECT_IO] = { + .type = QEMU_MIGRATION_PARAM_TYPE_BOOL, + }, }; G_STATIC_ASSERT(G_N_ELEMENTS(qemuMigrationParamInfo) == QEMU_MIGRATION_PARAM_LAST); @@ -793,7 +797,7 @@ qemuMigrationParamsFromFlags(virTypedParameterPtr params, qemuMigrationParams * -qemuMigrationParamsForSave(bool sparse) +qemuMigrationParamsForSave(bool sparse, unsigned int flags) { g_autoptr(qemuMigrationParams) saveParams = NULL; @@ -807,6 +811,11 @@ qemuMigrationParamsForSave(bool sparse) return NULL; saveParams->params[QEMU_MIGRATION_PARAM_MULTIFD_CHANNELS].value.i = 1; saveParams->params[QEMU_MIGRATION_PARAM_MULTIFD_CHANNELS].set = true; + + if (flags & VIR_DOMAIN_SAVE_BYPASS_CACHE) { + saveParams->params[QEMU_MIGRATION_PARAM_DIRECT_IO].value.b = true; + saveParams->params[QEMU_MIGRATION_PARAM_DIRECT_IO].set = true; + } } return g_steal_pointer(&saveParams); diff --git a/src/qemu/qemu_migration_params.h b/src/qemu/qemu_migration_params.h index 88a1bc1a66..87822332d3 100644 --- a/src/qemu/qemu_migration_params.h +++ b/src/qemu/qemu_migration_params.h @@ -66,6 +66,7 @@ typedef enum { QEMU_MIGRATION_PARAM_MULTIFD_ZLIB_LEVEL, QEMU_MIGRATION_PARAM_MULTIFD_ZSTD_LEVEL, QEMU_MIGRATION_PARAM_AVAIL_SWITCHOVER_BANDWIDTH, + QEMU_MIGRATION_PARAM_DIRECT_IO, QEMU_MIGRATION_PARAM_LAST } qemuMigrationParam; @@ -88,7 +89,7 @@ qemuMigrationParamsFromFlags(virTypedParameterPtr params, qemuMigrationParty party); qemuMigrationParams * -qemuMigrationParamsForSave(bool sparse); +qemuMigrationParamsForSave(bool sparse, unsigned int flags); int qemuMigrationParamsDump(qemuMigrationParams *migParams, diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index 8d244c6f4a..3b26fab90d 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -2235,7 +2235,8 @@ qemuMonitorMigrateToFd(qemuMonitor *mon, int qemuMonitorMigrateToFdSet(virDomainObj *vm, unsigned int flags, - int *fd) + int *fd, + int *directFd) { qemuDomainObjPrivate *priv = vm->privateData; qemuMonitor *mon = priv->mon; @@ -2244,7 +2245,7 @@ qemuMonitorMigrateToFdSet(virDomainObj *vm, g_autofree char *uri = NULL; int ret; - VIR_DEBUG("fd=%d flags=0x%x", *fd, flags); + VIR_DEBUG("fd=%d directFd=%d flags=0x%x", *fd, *directFd, flags); QEMU_CHECK_MONITOR(mon); @@ -2256,6 +2257,8 @@ qemuMonitorMigrateToFdSet(virDomainObj *vm, fdPassMigrate = qemuFDPassNew("libvirt-outgoing-migrate", priv); qemuFDPassAddFD(fdPassMigrate, fd, "-fd"); + if (*directFd != -1) + qemuFDPassAddFD(fdPassMigrate, directFd, "-directio-fd"); qemuFDPassTransferMonitor(fdPassMigrate, mon); uri = g_strdup_printf("file:%s,offset=%#lx", diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 4022bc3c7b..d5fa157696 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -861,7 +861,8 @@ int qemuMonitorMigrateToFd(qemuMonitor *mon, int qemuMonitorMigrateToFdSet(virDomainObj *vm, unsigned int flags, - int *fd); + int *fd, + int *directFd); int qemuMonitorMigrateToHost(qemuMonitor *mon, unsigned int flags, diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c index f00078bfcf..4c91c1be67 100644 --- a/src/qemu/qemu_saveimage.c +++ b/src/qemu/qemu_saveimage.c @@ -501,7 +501,7 @@ qemuSaveImageCreate(virQEMUDriver *driver, goto cleanup; /* Perform the migration */ - if (qemuMigrationSrcToFile(driver, vm, &fd, compressor, saveParams, flags, asyncJob) < 0) + if (qemuMigrationSrcToFile(driver, vm, path, &fd, compressor, saveParams, flags, asyncJob) < 0) goto cleanup; /* Touch up file header to mark image complete. */ -- 2.43.0

On Wed, Mar 05, 2025 at 03:48:22PM -0700, Jim Fehlig via Devel wrote:
When using the mapped-ram migration capability, direct IO is enabled by setting the "direct-io" migration parameter to "true" and passing QEMU an additional fd with O_DIRECT set.
Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_driver.c | 10 ++++++---- src/qemu/qemu_migration.c | 32 ++++++++++++++++++++++++++------ src/qemu/qemu_migration.h | 1 + src/qemu/qemu_migration_params.c | 11 ++++++++++- src/qemu/qemu_migration_params.h | 3 ++- src/qemu/qemu_monitor.c | 7 +++++-- src/qemu/qemu_monitor.h | 3 ++- src/qemu/qemu_saveimage.c | 2 +- 8 files changed, 53 insertions(+), 16 deletions(-)
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

When using the mapped-ram migration capability, direct IO is enabled by setting the "direct-io" migration parameter to "true" and passing QEMU an additional fd with O_DIRECT set. Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_migration.c | 11 ++++++----- src/qemu/qemu_process.c | 27 +++++++++++++++++++++++---- src/qemu/qemu_process.h | 6 ++++-- 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 4554a52165..6c8098c65d 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -3110,7 +3110,8 @@ qemuMigrationDstPrepareCleanup(virQEMUDriver *driver, } static qemuProcessIncomingDef * -qemuMigrationDstPrepare(virDomainObj *vm, +qemuMigrationDstPrepare(virQEMUDriver *driver, + virDomainObj *vm, bool tunnel, const char *protocol, const char *listenAddress, @@ -3170,9 +3171,9 @@ qemuMigrationDstPrepare(virDomainObj *vm, migrateFrom = g_strdup_printf(incFormat, protocol, listenAddress, port); } - return qemuProcessIncomingDefNew(vm, listenAddress, + return qemuProcessIncomingDefNew(driver, vm, listenAddress, migrateFrom, fd, - NULL, NULL); + NULL, NULL, NULL); } @@ -3312,7 +3313,7 @@ qemuMigrationDstPrepareActive(virQEMUDriver *driver, goto error; stopProcess = true; - if (!(incoming = qemuMigrationDstPrepare(vm, tunnel, protocol, + if (!(incoming = qemuMigrationDstPrepare(driver, vm, tunnel, protocol, listenAddress, port, &dataFD[0]))) goto error; @@ -3684,7 +3685,7 @@ qemuMigrationDstPrepareResume(virQEMUDriver *driver, priv->origname = g_strdup(origname); - if (!(incoming = qemuMigrationDstPrepare(vm, false, protocol, + if (!(incoming = qemuMigrationDstPrepare(driver, vm, false, protocol, listenAddress, port, NULL))) goto cleanup; diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index cba44ec5c2..16bb961485 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -4841,13 +4841,16 @@ qemuProcessIncomingDefFree(qemuProcessIncomingDef *inc) * qemuProcessIncomingDefFree will NOT close it. */ qemuProcessIncomingDef * -qemuProcessIncomingDefNew(virDomainObj *vm, +qemuProcessIncomingDefNew(virQEMUDriver *driver, + virDomainObj *vm, const char *listenAddress, const char *migrateFrom, int *fd, const char *path, - virQEMUSaveData *data) + virQEMUSaveData *data, + qemuMigrationParams *migParams) { + g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); qemuDomainObjPrivate *priv = vm->privateData; qemuProcessIncomingDef *inc = NULL; @@ -4860,9 +4863,25 @@ qemuProcessIncomingDefNew(virDomainObj *vm, if (data && data->header.format == QEMU_SAVE_FORMAT_SPARSE) { size_t offset = sizeof(virQEMUSaveHeader) + data->header.data_len; + bool directio = false; inc->fdPassMigrate = qemuFDPassNew("libvirt-incoming-migrate", priv); - qemuFDPassAddFD(inc->fdPassMigrate, fd, "-fd"); + /* When using directio with mapped-ram, qemu needs an fd without + * O_DIRECT set for reading small bits of unaligned state. */ + if (qemuMigrationParamsGetBool(migParams, QEMU_MIGRATION_PARAM_DIRECT_IO, &directio) < 0) + goto error; + + if (directio) { + VIR_AUTOCLOSE bufferedFd = -1; + + if ((bufferedFd = qemuDomainOpenFile(cfg, NULL, path, O_RDONLY, NULL)) < 0) + goto error; + + qemuFDPassAddFD(inc->fdPassMigrate, &bufferedFd, "-buffered-fd"); + qemuFDPassAddFD(inc->fdPassMigrate, fd, "direct-io-fd"); + } else { + qemuFDPassAddFD(inc->fdPassMigrate, fd, "-buffered-fd"); + } inc->uri = g_strdup_printf("file:%s,offset=%#lx", qemuFDPassGetPath(inc->fdPassMigrate), offset); } else { @@ -8531,7 +8550,7 @@ qemuProcessStartWithMemoryState(virConnectPtr conn, /* The fd passed to qemuProcessIncomingDefNew is used to create the migration * URI, so it must be called after starting the decompression program. */ - incoming = qemuProcessIncomingDefNew(vm, NULL, "stdio", fd, path, data); + incoming = qemuProcessIncomingDefNew(driver, vm, NULL, "stdio", fd, path, data, migParams); if (!incoming) return -1; diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h index c51335ad7a..8f7b3f24c4 100644 --- a/src/qemu/qemu_process.h +++ b/src/qemu/qemu_process.h @@ -57,12 +57,14 @@ struct _qemuProcessIncomingDef { const char *path; /* path associated with fd */ }; -qemuProcessIncomingDef *qemuProcessIncomingDefNew(virDomainObj *vm, +qemuProcessIncomingDef *qemuProcessIncomingDefNew(virQEMUDriver *driver, + virDomainObj *vm, const char *listenAddress, const char *migrateFrom, int *fd, const char *path, - virQEMUSaveData *data); + virQEMUSaveData *data, + qemuMigrationParams *migParams); void qemuProcessIncomingDefFree(qemuProcessIncomingDef *inc); -- 2.43.0

On Wed, Mar 05, 2025 at 03:48:23PM -0700, Jim Fehlig via Devel wrote:
When using the mapped-ram migration capability, direct IO is enabled by setting the "direct-io" migration parameter to "true" and passing QEMU an additional fd with O_DIRECT set.
Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_migration.c | 11 ++++++----- src/qemu/qemu_process.c | 27 +++++++++++++++++++++++---- src/qemu/qemu_process.h | 6 ++++-- 3 files changed, 33 insertions(+), 11 deletions(-)
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

From: Claudio Fontana <cfontana@suse.de> Add a new VIR_DOMAIN_SAVE_PARALLEL flag to the save and restore APIs, which can be used to specify the use of multiple, parallel channels for saving and restoring a domain. The number of parallel channels can be set using the VIR_DOMAIN_SAVE_PARAM_PARALLEL_CHANNELS typed parameter. Signed-off-by: Claudio Fontana <cfontana@suse.de> Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- include/libvirt/libvirt-domain.h | 11 +++++++++++ src/libvirt-domain.c | 3 +++ 2 files changed, 14 insertions(+) diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h index 0121620e9c..7f1a2003c0 100644 --- a/include/libvirt/libvirt-domain.h +++ b/include/libvirt/libvirt-domain.h @@ -1655,6 +1655,7 @@ typedef enum { VIR_DOMAIN_SAVE_RUNNING = 1 << 1, /* Favor running over paused (Since: 0.9.5) */ VIR_DOMAIN_SAVE_PAUSED = 1 << 2, /* Favor paused over running (Since: 0.9.5) */ VIR_DOMAIN_SAVE_RESET_NVRAM = 1 << 3, /* Re-initialize NVRAM from template (Since: 8.1.0) */ + VIR_DOMAIN_SAVE_PARALLEL = 1 << 4, /* Save and restore using parallel channels (Since: 10.6.0) */ } virDomainSaveRestoreFlags; int virDomainSave (virDomainPtr domain, @@ -1712,6 +1713,16 @@ int virDomainRestoreParams (virConnectPtr conn, */ # define VIR_DOMAIN_SAVE_PARAM_IMAGE_FORMAT "image_format" +/* + * VIR_DOMAIN_SAVE_PARAM_PARALLEL_CHANNELS: + * + * an optional parameter used to specify the number of IO channels to use + * during parallel save. As VIR_TYPED_PARAM_INT. + * + * Since: 11.2.0 + */ +# define VIR_DOMAIN_SAVE_PARAM_PARALLEL_CHANNELS "parallel.channels" + /* See below for virDomainSaveImageXMLFlags */ char * virDomainSaveImageGetXMLDesc (virConnectPtr conn, diff --git a/src/libvirt-domain.c b/src/libvirt-domain.c index 0ee7c6f45b..d0c1b6c037 100644 --- a/src/libvirt-domain.c +++ b/src/libvirt-domain.c @@ -1225,6 +1225,9 @@ virDomainRestoreFlags(virConnectPtr conn, const char *from, const char *dxml, * now, VIR_DOMAIN_SAVE_PARAM_FILE is required but this requirement may * be lifted in the future. * + * See VIR_DOMAIN_SAVE_PARAM_* for detailed description of accepted + * restore parameters. + * * Returns 0 in case of success and -1 in case of failure. * * Since: 8.4.0 -- 2.43.0

On Wed, Mar 05, 2025 at 03:48:24PM -0700, Jim Fehlig via Devel wrote:
From: Claudio Fontana <cfontana@suse.de>
Add a new VIR_DOMAIN_SAVE_PARALLEL flag to the save and restore APIs, which can be used to specify the use of multiple, parallel channels for saving and restoring a domain. The number of parallel channels can be set using the VIR_DOMAIN_SAVE_PARAM_PARALLEL_CHANNELS typed parameter.
Signed-off-by: Claudio Fontana <cfontana@suse.de> Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- include/libvirt/libvirt-domain.h | 11 +++++++++++ src/libvirt-domain.c | 3 +++ 2 files changed, 14 insertions(+)
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

Add support for parallel save and restore by mapping libvirt's "parallel-channels" parameter to QEMU's "multifd-channels" migration parameter. Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_driver.c | 32 +++++++++++++++++++++----------- src/qemu/qemu_migration_params.c | 31 +++++++++++++++++++++++++++++-- src/qemu/qemu_migration_params.h | 5 ++++- 3 files changed, 54 insertions(+), 14 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index e098a96a4e..bbabc714d3 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -2590,6 +2590,8 @@ qemuDomainSaveInternal(virQEMUDriver *driver, int format, virCommand *compressor, const char *xmlin, + virTypedParameterPtr params, + int nparams, unsigned int flags) { g_autofree char *xml = NULL; @@ -2679,7 +2681,8 @@ qemuDomainSaveInternal(virQEMUDriver *driver, goto endjob; xml = NULL; - if (!(saveParams = qemuMigrationParamsForSave(format == QEMU_SAVE_FORMAT_SPARSE, + if (!(saveParams = qemuMigrationParamsForSave(params, nparams, + format == QEMU_SAVE_FORMAT_SPARSE, flags))) goto endjob; @@ -2763,7 +2766,7 @@ qemuDomainManagedSaveHelper(virQEMUDriver *driver, VIR_INFO("Saving state of domain '%s' to '%s'", vm->def->name, path); if (qemuDomainSaveInternal(driver, vm, path, cfg->saveImageFormat, - compressor, dxml, flags) < 0) + compressor, dxml, NULL, 0, flags) < 0) return -1; vm->hasManagedSave = true; @@ -2799,7 +2802,7 @@ qemuDomainSaveFlags(virDomainPtr dom, const char *path, const char *dxml, goto cleanup; ret = qemuDomainSaveInternal(driver, vm, path, cfg->saveImageFormat, - compressor, dxml, flags); + compressor, dxml, NULL, 0, flags); cleanup: virDomainObjEndAPI(&vm); @@ -2830,7 +2833,8 @@ qemuDomainSaveParams(virDomainPtr dom, virCheckFlags(VIR_DOMAIN_SAVE_BYPASS_CACHE | VIR_DOMAIN_SAVE_RUNNING | - VIR_DOMAIN_SAVE_PAUSED, -1); + VIR_DOMAIN_SAVE_PAUSED | + VIR_DOMAIN_SAVE_PARALLEL, -1); if (virTypedParamsValidate(params, nparams, VIR_DOMAIN_SAVE_PARAM_FILE, @@ -2839,6 +2843,8 @@ qemuDomainSaveParams(virDomainPtr dom, VIR_TYPED_PARAM_STRING, VIR_DOMAIN_SAVE_PARAM_IMAGE_FORMAT, VIR_TYPED_PARAM_STRING, + VIR_DOMAIN_SAVE_PARAM_PARALLEL_CHANNELS, + VIR_TYPED_PARAM_INT, NULL) < 0) return -1; @@ -2876,7 +2882,7 @@ qemuDomainSaveParams(virDomainPtr dom, goto cleanup; ret = qemuDomainSaveInternal(driver, vm, to, format, - compressor, dxml, flags); + compressor, dxml, params, nparams, flags); cleanup: virDomainObjEndAPI(&vm); @@ -5744,6 +5750,8 @@ static int qemuDomainRestoreInternal(virConnectPtr conn, const char *path, const char *dxml, + virTypedParameterPtr params, + int nparams, unsigned int flags, int (*ensureACL)(virConnectPtr, virDomainDef *)) { @@ -5765,7 +5773,8 @@ qemuDomainRestoreInternal(virConnectPtr conn, virCheckFlags(VIR_DOMAIN_SAVE_BYPASS_CACHE | VIR_DOMAIN_SAVE_RUNNING | VIR_DOMAIN_SAVE_PAUSED | - VIR_DOMAIN_SAVE_RESET_NVRAM, -1); + VIR_DOMAIN_SAVE_RESET_NVRAM | + VIR_DOMAIN_SAVE_PARALLEL, -1); if (flags & VIR_DOMAIN_SAVE_RESET_NVRAM) reset_nvram = true; @@ -5774,7 +5783,7 @@ qemuDomainRestoreInternal(virConnectPtr conn, goto cleanup; sparse = data->header.format == QEMU_SAVE_FORMAT_SPARSE; - if (!(restoreParams = qemuMigrationParamsForSave(sparse, flags))) + if (!(restoreParams = qemuMigrationParamsForSave(params, nparams, sparse, flags))) goto cleanup; fd = qemuSaveImageOpen(driver, path, @@ -5856,7 +5865,7 @@ qemuDomainRestoreFlags(virConnectPtr conn, const char *dxml, unsigned int flags) { - return qemuDomainRestoreInternal(conn, path, dxml, flags, + return qemuDomainRestoreInternal(conn, path, dxml, NULL, 0, flags, virDomainRestoreFlagsEnsureACL); } @@ -5864,7 +5873,7 @@ static int qemuDomainRestore(virConnectPtr conn, const char *path) { - return qemuDomainRestoreInternal(conn, path, NULL, 0, + return qemuDomainRestoreInternal(conn, path, NULL, NULL, 0, 0, virDomainRestoreEnsureACL); } @@ -5880,6 +5889,7 @@ qemuDomainRestoreParams(virConnectPtr conn, if (virTypedParamsValidate(params, nparams, VIR_DOMAIN_SAVE_PARAM_FILE, VIR_TYPED_PARAM_STRING, VIR_DOMAIN_SAVE_PARAM_DXML, VIR_TYPED_PARAM_STRING, + VIR_DOMAIN_SAVE_PARAM_PARALLEL_CHANNELS, VIR_TYPED_PARAM_INT, NULL) < 0) return -1; @@ -5896,7 +5906,7 @@ qemuDomainRestoreParams(virConnectPtr conn, return -1; } - ret = qemuDomainRestoreInternal(conn, path, dxml, flags, + ret = qemuDomainRestoreInternal(conn, path, dxml, params, nparams, flags, virDomainRestoreParamsEnsureACL); return ret; } @@ -6108,7 +6118,7 @@ qemuDomainObjRestore(virConnectPtr conn, } sparse = data->header.format == QEMU_SAVE_FORMAT_SPARSE; - if (!(restoreParams = qemuMigrationParamsForSave(sparse, + if (!(restoreParams = qemuMigrationParamsForSave(NULL, 0, sparse, bypass_cache ? VIR_DOMAIN_SAVE_BYPASS_CACHE : 0))) return -1; diff --git a/src/qemu/qemu_migration_params.c b/src/qemu/qemu_migration_params.c index 16e93f27d6..b696b0d13e 100644 --- a/src/qemu/qemu_migration_params.c +++ b/src/qemu/qemu_migration_params.c @@ -797,10 +797,19 @@ qemuMigrationParamsFromFlags(virTypedParameterPtr params, qemuMigrationParams * -qemuMigrationParamsForSave(bool sparse, unsigned int flags) +qemuMigrationParamsForSave(virTypedParameterPtr params, + int nparams, + bool sparse, + unsigned int flags) { g_autoptr(qemuMigrationParams) saveParams = NULL; + if (flags & VIR_DOMAIN_SAVE_PARALLEL && !sparse) { + virReportError(VIR_ERR_INVALID_ARG, "%s", + _("Parallel save is only supported with the 'sparse' save image format")); + return NULL; + } + if (!(saveParams = qemuMigrationParamsNew())) return NULL; @@ -809,7 +818,25 @@ qemuMigrationParamsForSave(bool sparse, unsigned int flags) return NULL; if (virBitmapSetBit(saveParams->caps, QEMU_MIGRATION_CAP_MULTIFD) < 0) return NULL; - saveParams->params[QEMU_MIGRATION_PARAM_MULTIFD_CHANNELS].value.i = 1; + + if (flags & VIR_DOMAIN_SAVE_PARALLEL) { + int nchannels; + + if (params && virTypedParamsGetInt(params, nparams, + VIR_DOMAIN_SAVE_PARAM_PARALLEL_CHANNELS, + &nchannels) < 0) + return NULL; + + if (nchannels < 1) { + virReportError(VIR_ERR_INVALID_ARG, "%s", + _("number of parallel save channels cannot be less than 1")); + return NULL; + } + + saveParams->params[QEMU_MIGRATION_PARAM_MULTIFD_CHANNELS].value.i = nchannels; + } else { + saveParams->params[QEMU_MIGRATION_PARAM_MULTIFD_CHANNELS].value.i = 1; + } saveParams->params[QEMU_MIGRATION_PARAM_MULTIFD_CHANNELS].set = true; if (flags & VIR_DOMAIN_SAVE_BYPASS_CACHE) { diff --git a/src/qemu/qemu_migration_params.h b/src/qemu/qemu_migration_params.h index 87822332d3..9d771d519d 100644 --- a/src/qemu/qemu_migration_params.h +++ b/src/qemu/qemu_migration_params.h @@ -89,7 +89,10 @@ qemuMigrationParamsFromFlags(virTypedParameterPtr params, qemuMigrationParty party); qemuMigrationParams * -qemuMigrationParamsForSave(bool sparse, unsigned int flags); +qemuMigrationParamsForSave(virTypedParameterPtr params, + int nparams, + bool sparse, + unsigned int flags); int qemuMigrationParamsDump(qemuMigrationParams *migParams, -- 2.43.0

On Wed, Mar 05, 2025 at 03:48:25PM -0700, Jim Fehlig via Devel wrote:
Add support for parallel save and restore by mapping libvirt's "parallel-channels" parameter to QEMU's "multifd-channels" migration parameter.
Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- src/qemu/qemu_driver.c | 32 +++++++++++++++++++++----------- src/qemu/qemu_migration_params.c | 31 +++++++++++++++++++++++++++++-- src/qemu/qemu_migration_params.h | 5 ++++- 3 files changed, 54 insertions(+), 14 deletions(-)
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

From: Claudio Fontana <cfontana@suse.de> Signed-off-by: Claudio Fontana <cfontana@suse.de> Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- docs/manpages/virsh.rst | 12 +++++++++++- tools/virsh-domain.c | 42 +++++++++++++++++++++++++++++++++++++---- 2 files changed, 49 insertions(+), 5 deletions(-) diff --git a/docs/manpages/virsh.rst b/docs/manpages/virsh.rst index baced15dec..5d4a77bdf4 100644 --- a/docs/manpages/virsh.rst +++ b/docs/manpages/virsh.rst @@ -4015,6 +4015,7 @@ save :: save domain state-file [--bypass-cache] [--xml file] + [--parallel] [--parallel-channels channels] [{--running | --paused}] [--verbose] Saves a running domain (RAM, but not disk state) to a state file so that @@ -4022,8 +4023,11 @@ it can be restored later. Once saved, the domain will no longer be running on the system, thus the memory allocated for the domain will be free for other domains to use. ``virsh restore`` restores from this state file. + If *--bypass-cache* is specified, the save will avoid the file system -cache, although this may slow down the operation. +cache. Depending on the specific scenario this may slow down or speed up +the operation. + The progress may be monitored using ``domjobinfo`` virsh command and canceled with ``domjobabort`` command (sent by another virsh instance). Another option @@ -4045,6 +4049,12 @@ based on the state the domain was in when the save was done; passing either the *--running* or *--paused* flag will allow overriding which state the ``restore`` should use. +*--parallel* option will cause the save data to be written to file +over multiple parallel IO channels. The number of channels can be +specified using *--parallel-channels*. Using parallel IO channels +requires the use of ``sparse`` image save format. Parallel save may +significantly reduce the time required to save large memory domains. + Domain saved state files assume that disk images will be unchanged between the creation and restore point. For a more complete system restore point, where the disk state is saved alongside the memory diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 93c34c4971..797df48d9d 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -4145,6 +4145,14 @@ static const vshCmdOptDef opts_save[] = { .type = VSH_OT_BOOL, .help = N_("avoid file system cache when saving") }, + {.name = "parallel", + .type = VSH_OT_BOOL, + .help = N_("enable parallel save") + }, + {.name = "parallel-channels", + .type = VSH_OT_INT, + .help = N_("number of extra IO channels to use for parallel save") + }, {.name = "xml", .type = VSH_OT_STRING, .unwanted_positional = true, @@ -4175,6 +4183,11 @@ doSave(void *opaque) g_autoptr(virshDomain) dom = NULL; const char *name = NULL; const char *to = NULL; + virTypedParameterPtr params = NULL; + int nparams = 0; + int maxparams = 0; + int nchannels = 1; + int rv = -1; unsigned int flags = 0; const char *xmlfile = NULL; g_autofree char *xml = NULL; @@ -4188,15 +4201,30 @@ doSave(void *opaque) goto out_sig; #endif /* !WIN32 */ - if (vshCommandOptString(ctl, cmd, "file", &to) < 0) - goto out; - if (vshCommandOptBool(cmd, "bypass-cache")) flags |= VIR_DOMAIN_SAVE_BYPASS_CACHE; if (vshCommandOptBool(cmd, "running")) flags |= VIR_DOMAIN_SAVE_RUNNING; if (vshCommandOptBool(cmd, "paused")) flags |= VIR_DOMAIN_SAVE_PAUSED; + if (vshCommandOptBool(cmd, "parallel")) + flags |= VIR_DOMAIN_SAVE_PARALLEL; + + if (vshCommandOptString(ctl, cmd, "file", &to) < 0) + goto out; + if (to && + virTypedParamsAddString(¶ms, &nparams, &maxparams, + VIR_DOMAIN_SAVE_PARAM_FILE, to) < 0) + goto out; + + if (flags & VIR_DOMAIN_SAVE_PARALLEL) { + if ((rv = vshCommandOptInt(ctl, cmd, "parallel-channels", &nchannels)) < 0) + goto out; + + if (virTypedParamsAddInt(¶ms, &nparams, &maxparams, + VIR_DOMAIN_SAVE_PARAM_PARALLEL_CHANNELS, nchannels) < 0) + goto out; + } if (vshCommandOptString(ctl, cmd, "xml", &xmlfile) < 0) goto out; @@ -4209,9 +4237,13 @@ doSave(void *opaque) vshReportError(ctl); goto out; } + if (xml && + virTypedParamsAddString(¶ms, &nparams, &maxparams, + VIR_DOMAIN_SAVE_PARAM_DXML, xml) < 0) + goto out; if (flags || xml) { - rc = virDomainSaveFlags(dom, to, xml, flags); + rc = virDomainSaveParams(dom, params, nparams, flags); } else { rc = virDomainSave(dom, to); } @@ -4224,6 +4256,8 @@ doSave(void *opaque) data->ret = 0; out: + virTypedParamsFree(params, nparams); + #ifndef WIN32 pthread_sigmask(SIG_SETMASK, &oldsigmask, NULL); out_sig: -- 2.43.0

On Wed, Mar 05, 2025 at 03:48:26PM -0700, Jim Fehlig via Devel wrote:
From: Claudio Fontana <cfontana@suse.de>
Signed-off-by: Claudio Fontana <cfontana@suse.de> Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- docs/manpages/virsh.rst | 12 +++++++++++- tools/virsh-domain.c | 42 +++++++++++++++++++++++++++++++++++++---- 2 files changed, 49 insertions(+), 5 deletions(-)
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

From: Claudio Fontana <cfontana@suse.de> Signed-off-by: Claudio Fontana <cfontana@suse.de> Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- docs/manpages/virsh.rst | 9 +++++++-- tools/virsh-domain.c | 39 +++++++++++++++++++++++++++++++++++---- 2 files changed, 42 insertions(+), 6 deletions(-) diff --git a/docs/manpages/virsh.rst b/docs/manpages/virsh.rst index 5d4a77bdf4..40a13993ed 100644 --- a/docs/manpages/virsh.rst +++ b/docs/manpages/virsh.rst @@ -3966,12 +3966,13 @@ restore :: restore state-file [--bypass-cache] [--xml file] - [{--running | --paused}] [--reset-nvram] + [{--running | --paused}] [--reset-nvram] [--parallel] [--parallel-channels] Restores a domain from a ``virsh save`` state file. See *save* for more info. If *--bypass-cache* is specified, the restore will avoid the file system -cache, although this may slow down the operation. +cache. Depending on the specific scenario this may slow down or speed up +the operation. *--xml* ``file`` is usually omitted, but can be used to supply an alternative XML file for use on the restored guest with changes only @@ -3987,6 +3988,10 @@ domain should be started in. If *--reset-nvram* is specified, any existing NVRAM file will be deleted and re-initialized from its pristine template. +*--parallel* option will cause the save data to be loaded using the number +of parallel IO channels specified with *--parallel-channels*. Parallel +channels will help speed up large restore operations. + ``Note``: To avoid corrupting file system contents within the domain, you should not reuse the saved state file for a second ``restore`` unless you have also reverted all storage volumes back to the same contents as when diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 797df48d9d..acc3064983 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -5281,6 +5281,14 @@ static const vshCmdOptDef opts_restore[] = { .type = VSH_OT_BOOL, .help = N_("avoid file system cache when restoring") }, + {.name = "parallel", + .type = VSH_OT_BOOL, + .help = N_("enable parallel restore") + }, + {.name = "parallel-channels", + .type = VSH_OT_INT, + .help = N_("number of IO channels to use for parallel restore") + }, {.name = "xml", .type = VSH_OT_STRING, .unwanted_positional = true, @@ -5310,13 +5318,16 @@ cmdRestore(vshControl *ctl, const vshCmd *cmd) const char *xmlfile = NULL; g_autofree char *xml = NULL; virshControl *priv = ctl->privData; + virTypedParameterPtr params = NULL; + int nparams = 0; + int maxparams = 0; + int nchannels = 1; int rc; - if (vshCommandOptString(ctl, cmd, "file", &from) < 0) - return false; - if (vshCommandOptBool(cmd, "bypass-cache")) flags |= VIR_DOMAIN_SAVE_BYPASS_CACHE; + if (vshCommandOptBool(cmd, "parallel")) + flags |= VIR_DOMAIN_SAVE_PARALLEL; if (vshCommandOptBool(cmd, "running")) flags |= VIR_DOMAIN_SAVE_RUNNING; if (vshCommandOptBool(cmd, "paused")) @@ -5324,15 +5335,35 @@ cmdRestore(vshControl *ctl, const vshCmd *cmd) if (vshCommandOptBool(cmd, "reset-nvram")) flags |= VIR_DOMAIN_SAVE_RESET_NVRAM; + if (vshCommandOptString(ctl, cmd, "file", &from) < 0) + return false; + if (from && + virTypedParamsAddString(¶ms, &nparams, &maxparams, + VIR_DOMAIN_SAVE_PARAM_FILE, from) < 0) + return false; + if (vshCommandOptString(ctl, cmd, "xml", &xmlfile) < 0) return false; if (xmlfile && virFileReadAll(xmlfile, VSH_MAX_XML_FILE, &xml) < 0) return false; + if (xml && + virTypedParamsAddString(¶ms, &nparams, &maxparams, + VIR_DOMAIN_SAVE_PARAM_DXML, xml) < 0) + return false; + + if (flags & VIR_DOMAIN_SAVE_PARALLEL) { + if ((rc = vshCommandOptInt(ctl, cmd, "parallel-channels", &nchannels)) < 0) + return false; + + if (virTypedParamsAddInt(¶ms, &nparams, &maxparams, + VIR_DOMAIN_SAVE_PARAM_PARALLEL_CHANNELS, nchannels) < 0) + return false; + } if (flags || xml) { - rc = virDomainRestoreFlags(priv->conn, from, xml, flags); + rc = virDomainRestoreParams(priv->conn, params, nparams, flags); } else { rc = virDomainRestore(priv->conn, from); } -- 2.43.0

On Wed, Mar 05, 2025 at 03:48:27PM -0700, Jim Fehlig via Devel wrote:
From: Claudio Fontana <cfontana@suse.de>
Signed-off-by: Claudio Fontana <cfontana@suse.de> Signed-off-by: Jim Fehlig <jfehlig@suse.com> --- docs/manpages/virsh.rst | 9 +++++++-- tools/virsh-domain.c | 39 +++++++++++++++++++++++++++++++++++---- 2 files changed, 42 insertions(+), 6 deletions(-)
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

Hi All, Does anyone have time to review this series? QEMU has supported mapped-ram for a few releases now. Adding support in libvirt allows us to build parallel save/restore on top. Would be a nice feature for the next release :-). Regards, Jim On 3/5/25 15:48, Jim Fehlig wrote:
V4 series adding support for QEMU's mapped-ram stream format [1] and migration capability. The use of mapped-ram is controlled by extending save_image_format setting in qemu.conf with a new 'sparse' option. The 'raw' format continues to be the default.
Also included are patches that leverage mapped-ram to add support for parallel save/restore.
Changes in V4: * Rebased on latest master, including the series "qemu: Support specifying save image format"
V1: https://lists.libvirt.org/archives/list/devel@lists.libvirt.org/message/MNBH...
V2: https://lists.libvirt.org/archives/list/devel@lists.libvirt.org/message/GNJN...
V3: https://lists.libvirt.org/archives/list/devel@lists.libvirt.org/message/SK2P...
[1] https://gitlab.com/qemu-project/qemu/-/blob/master/docs/devel/migration/mapp...
Claudio Fontana (3): include: Define constants for parallel save/restore tools: add parallel parameter to virsh save command tools: add parallel parameter to virsh restore command
Jim Fehlig (15): lib: virDomain{Save,Restore}Params: Ensure absolute path qemu: Add function to get FDPass object from monitor qemu: Add function to check capability in migration params qemu: Add function to get bool value from migration params qemu: Add mapped-ram migration capability qemu: Add function to get migration params for save qemu_saveimage: add "sparse" to supported save image formats qemu: Add helper function for creating save image fd qemu: Add support for mapped-ram on save qemu: Move creation of qemuProcessIncomingDef struct qemu: Apply migration parameters in qemuMigrationDstRun qemu: Add support for mapped-ram on restore qemu: Support O_DIRECT with mapped-ram on save qemu: Support O_DIRECT with mapped-ram on restore qemu: Add support for parallel save and restore
docs/manpages/virsh.rst | 21 +++- include/libvirt/libvirt-domain.h | 11 ++ src/libvirt-domain.c | 92 +++++++++++--- src/qemu/qemu.conf.in | 9 +- src/qemu/qemu_driver.c | 78 +++++++++--- src/qemu/qemu_fd.c | 46 +++++++ src/qemu/qemu_fd.h | 4 + src/qemu/qemu_migration.c | 204 ++++++++++++++++++++++--------- src/qemu/qemu_migration.h | 10 +- src/qemu/qemu_migration_params.c | 92 ++++++++++++++ src/qemu/qemu_migration_params.h | 17 +++ src/qemu/qemu_monitor.c | 37 ++++++ src/qemu/qemu_monitor.h | 5 + src/qemu/qemu_process.c | 100 ++++++++++----- src/qemu/qemu_process.h | 19 ++- src/qemu/qemu_saveimage.c | 120 ++++++++++++------ src/qemu/qemu_saveimage.h | 4 + src/qemu/qemu_snapshot.c | 15 ++- tools/virsh-domain.c | 81 ++++++++++-- 19 files changed, 775 insertions(+), 190 deletions(-)

Hi Jim, There are a couple of CI failures after this, one on 32-bit and one with CLang On Wed, Mar 05, 2025 at 03:48:09PM -0700, Jim Fehlig via Devel wrote:
V4 series adding support for QEMU's mapped-ram stream format [1] and migration capability. The use of mapped-ram is controlled by extending save_image_format setting in qemu.conf with a new 'sparse' option. The 'raw' format continues to be the default.
Also included are patches that leverage mapped-ram to add support for parallel save/restore.
Changes in V4: * Rebased on latest master, including the series "qemu: Support specifying save image format"
V1: https://lists.libvirt.org/archives/list/devel@lists.libvirt.org/message/MNBH...
V2: https://lists.libvirt.org/archives/list/devel@lists.libvirt.org/message/GNJN...
V3: https://lists.libvirt.org/archives/list/devel@lists.libvirt.org/message/SK2P...
[1] https://gitlab.com/qemu-project/qemu/-/blob/master/docs/devel/migration/mapp...
Claudio Fontana (3): include: Define constants for parallel save/restore tools: add parallel parameter to virsh save command tools: add parallel parameter to virsh restore command
Jim Fehlig (15): lib: virDomain{Save,Restore}Params: Ensure absolute path qemu: Add function to get FDPass object from monitor qemu: Add function to check capability in migration params qemu: Add function to get bool value from migration params qemu: Add mapped-ram migration capability qemu: Add function to get migration params for save qemu_saveimage: add "sparse" to supported save image formats qemu: Add helper function for creating save image fd qemu: Add support for mapped-ram on save qemu: Move creation of qemuProcessIncomingDef struct qemu: Apply migration parameters in qemuMigrationDstRun qemu: Add support for mapped-ram on restore qemu: Support O_DIRECT with mapped-ram on save qemu: Support O_DIRECT with mapped-ram on restore qemu: Add support for parallel save and restore
docs/manpages/virsh.rst | 21 +++- include/libvirt/libvirt-domain.h | 11 ++ src/libvirt-domain.c | 92 +++++++++++--- src/qemu/qemu.conf.in | 9 +- src/qemu/qemu_driver.c | 78 +++++++++--- src/qemu/qemu_fd.c | 46 +++++++ src/qemu/qemu_fd.h | 4 + src/qemu/qemu_migration.c | 204 ++++++++++++++++++++++--------- src/qemu/qemu_migration.h | 10 +- src/qemu/qemu_migration_params.c | 92 ++++++++++++++ src/qemu/qemu_migration_params.h | 17 +++ src/qemu/qemu_monitor.c | 37 ++++++ src/qemu/qemu_monitor.h | 5 + src/qemu/qemu_process.c | 100 ++++++++++----- src/qemu/qemu_process.h | 19 ++- src/qemu/qemu_saveimage.c | 120 ++++++++++++------ src/qemu/qemu_saveimage.h | 4 + src/qemu/qemu_snapshot.c | 15 ++- tools/virsh-domain.c | 81 ++++++++++-- 19 files changed, 775 insertions(+), 190 deletions(-)
-- 2.43.0
With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|

On 3/20/25 11:55, Daniel P. Berrangé wrote:
Hi Jim,
There are a couple of CI failures after this, one on 32-bit and one with CLang
Yep, received mail from gitlab as well. Will take a look... Regards, Jim
On Wed, Mar 05, 2025 at 03:48:09PM -0700, Jim Fehlig via Devel wrote:
V4 series adding support for QEMU's mapped-ram stream format [1] and migration capability. The use of mapped-ram is controlled by extending save_image_format setting in qemu.conf with a new 'sparse' option. The 'raw' format continues to be the default.
Also included are patches that leverage mapped-ram to add support for parallel save/restore.
Changes in V4: * Rebased on latest master, including the series "qemu: Support specifying save image format"
V1: https://lists.libvirt.org/archives/list/devel@lists.libvirt.org/message/MNBH...
V2: https://lists.libvirt.org/archives/list/devel@lists.libvirt.org/message/GNJN...
V3: https://lists.libvirt.org/archives/list/devel@lists.libvirt.org/message/SK2P...
[1] https://gitlab.com/qemu-project/qemu/-/blob/master/docs/devel/migration/mapp...
Claudio Fontana (3): include: Define constants for parallel save/restore tools: add parallel parameter to virsh save command tools: add parallel parameter to virsh restore command
Jim Fehlig (15): lib: virDomain{Save,Restore}Params: Ensure absolute path qemu: Add function to get FDPass object from monitor qemu: Add function to check capability in migration params qemu: Add function to get bool value from migration params qemu: Add mapped-ram migration capability qemu: Add function to get migration params for save qemu_saveimage: add "sparse" to supported save image formats qemu: Add helper function for creating save image fd qemu: Add support for mapped-ram on save qemu: Move creation of qemuProcessIncomingDef struct qemu: Apply migration parameters in qemuMigrationDstRun qemu: Add support for mapped-ram on restore qemu: Support O_DIRECT with mapped-ram on save qemu: Support O_DIRECT with mapped-ram on restore qemu: Add support for parallel save and restore
docs/manpages/virsh.rst | 21 +++- include/libvirt/libvirt-domain.h | 11 ++ src/libvirt-domain.c | 92 +++++++++++--- src/qemu/qemu.conf.in | 9 +- src/qemu/qemu_driver.c | 78 +++++++++--- src/qemu/qemu_fd.c | 46 +++++++ src/qemu/qemu_fd.h | 4 + src/qemu/qemu_migration.c | 204 ++++++++++++++++++++++--------- src/qemu/qemu_migration.h | 10 +- src/qemu/qemu_migration_params.c | 92 ++++++++++++++ src/qemu/qemu_migration_params.h | 17 +++ src/qemu/qemu_monitor.c | 37 ++++++ src/qemu/qemu_monitor.h | 5 + src/qemu/qemu_process.c | 100 ++++++++++----- src/qemu/qemu_process.h | 19 ++- src/qemu/qemu_saveimage.c | 120 ++++++++++++------ src/qemu/qemu_saveimage.h | 4 + src/qemu/qemu_snapshot.c | 15 ++- tools/virsh-domain.c | 81 ++++++++++-- 19 files changed, 775 insertions(+), 190 deletions(-)
-- 2.43.0
With regards, Daniel
participants (3)
-
Daniel P. Berrangé
-
Jim Fehlig
-
Martin Kletzander