On Thu, Jun 13, 2024 at 04:43:21PM -0600, Jim Fehlig via Devel wrote:
Signed-off-by: Jim Fehlig <jfehlig(a)suse.com>
---
src/qemu/qemu_migration.c | 19 +++++++++++----
src/qemu/qemu_migration.h | 3 ++-
src/qemu/qemu_process.c | 49 ++++++++++++++++++++++++++++-----------
src/qemu/qemu_process.h | 13 +++++++----
src/qemu/qemu_saveimage.c | 26 ++++++++++++++-------
5 files changed, 76 insertions(+), 34 deletions(-)
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 7ef7040a85..f700390a8c 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -7717,8 +7733,12 @@ qemuProcessLaunch(virConnectPtr conn,
&nnicindexes, &nicindexes)))
goto cleanup;
- if (incoming && incoming->fd != -1)
+ if (incoming && incoming->fd != -1) {
virCommandPassFD(cmd, incoming->fd, 0);
+ if (incoming->fdPassMigrate != NULL) {
+ qemuFDPassTransferCommand(incoming->fdPassMigrate, cmd);
No need for this last condition, qemuFDPassTransferCommand is no-op when
the first parameter is NULL.
+ }
+ }
/* now that we know it is about to start call the hook if present */
if (qemuProcessStartHook(driver, vm,
@@ -8153,7 +8173,8 @@ qemuProcessStart(virConnectPtr conn,
VIR_QEMU_PROCESS_START_PAUSED |
VIR_QEMU_PROCESS_START_AUTODESTROY |
VIR_QEMU_PROCESS_START_GEN_VMID |
- VIR_QEMU_PROCESS_START_RESET_NVRAM, cleanup);
+ VIR_QEMU_PROCESS_START_RESET_NVRAM |
+ VIR_QEMU_PROCESS_START_MAPPED_RAM, cleanup);
if (!migrateFrom && !snapshot)
flags |= VIR_QEMU_PROCESS_START_NEW;
@@ -8163,8 +8184,8 @@ qemuProcessStart(virConnectPtr conn,
goto cleanup;
if (migrateFrom) {
- incoming = qemuProcessIncomingDefNew(priv->qemuCaps, NULL, migrateFrom,
- migrateFd, migratePath);
+ incoming = qemuProcessIncomingDefNew(vm, NULL, migrateFrom,
+ migrateFd, migratePath, flags);
if (!incoming)
goto stop;
}
@@ -8191,7 +8212,7 @@ qemuProcessStart(virConnectPtr conn,
relabel = true;
if (incoming) {
- if (qemuMigrationDstRun(vm, incoming->uri, asyncJob) < 0)
+ if (qemuMigrationDstRun(vm, incoming->uri, asyncJob, flags) < 0)
goto stop;
} else {
/* Refresh state of devices from QEMU. During migration this happens
diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h
index c1ea949215..a5212ee56e 100644
--- a/src/qemu/qemu_process.h
+++ b/src/qemu/qemu_process.h
@@ -54,14 +54,16 @@ struct _qemuProcessIncomingDef {
char *address; /* address where QEMU is supposed to listen */
char *uri; /* used when calling migrate-incoming QMP command */
int fd; /* for fd:N URI */
+ qemuFDPass *fdPassMigrate; /* for file:/dev/fdset/n,offset=x URI */
const char *path; /* path associated with fd */
};
-qemuProcessIncomingDef *qemuProcessIncomingDefNew(virQEMUCaps *qemuCaps,
- const char *listenAddress,
- const char *migrateFrom,
- int fd,
- const char *path);
+qemuProcessIncomingDef *qemuProcessIncomingDefNew(virDomainObj *vm,
+ const char *listenAddress,
+ const char *migrateFrom,
+ int fd,
+ const char *path,
+ unsigned int flags);
void qemuProcessIncomingDefFree(qemuProcessIncomingDef *inc);
int qemuProcessBeginJob(virDomainObj *vm,
@@ -77,6 +79,7 @@ typedef enum {
VIR_QEMU_PROCESS_START_NEW = 1 << 4, /* internal, new VM is starting
*/
VIR_QEMU_PROCESS_START_GEN_VMID = 1 << 5, /* Generate a new VMID */
VIR_QEMU_PROCESS_START_RESET_NVRAM = 1 << 6, /* Re-initialize NVRAM from
template */
+ VIR_QEMU_PROCESS_START_MAPPED_RAM = 1 << 7, /* Re-initialize NVRAM from
template */
copy paste error in comment ;)
} qemuProcessStartFlags;
int qemuProcessStart(virConnectPtr conn,
diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c
index 8f28770086..1545c00472 100644
--- a/src/qemu/qemu_saveimage.c
+++ b/src/qemu/qemu_saveimage.c
@@ -628,6 +628,7 @@ qemuSaveImageOpen(virQEMUDriver *driver,
int oflags = open_write ? O_RDWR : O_RDONLY;
size_t xml_len;
size_t cookie_len;
+ bool use_mapped_ram = false;
if (bypass_cache) {
int directFlag = virFileDirectFdFlag();
@@ -642,11 +643,6 @@ qemuSaveImageOpen(virQEMUDriver *driver,
if ((fd = qemuDomainOpenFile(cfg, NULL, path, oflags, NULL)) < 0)
return -1;
- if (bypass_cache &&
- !(*wrapperFd = virFileWrapperFdNew(&fd, path,
- VIR_FILE_WRAPPER_BYPASS_CACHE)))
- return -1;
-
data = g_new0(virQEMUSaveData, 1);
header = &data->header;
@@ -708,10 +704,14 @@ qemuSaveImageOpen(virQEMUDriver *driver,
return -1;
}
- if (header->features && header->features !=
QEMU_SAVE_FEATURE_MAPPED_RAM) {
- virReportError(VIR_ERR_OPERATION_FAILED, "%s",
- _("image contains unsupported features)"));
- return -1;
+ if (header->features) {
+ if (header->features == QEMU_SAVE_FEATURE_MAPPED_RAM) {
+ use_mapped_ram = true;
+ } else {
+ virReportError(VIR_ERR_OPERATION_FAILED, "%s",
+ _("image contains unsupported features)"));
+ return -1;
+ }
}
if (header->cookieOffset)
@@ -739,6 +739,11 @@ qemuSaveImageOpen(virQEMUDriver *driver,
}
}
+ if (bypass_cache && !use_mapped_ram &&
+ !(*wrapperFd = virFileWrapperFdNew(&fd, path,
+ VIR_FILE_WRAPPER_BYPASS_CACHE)))
+ return -1;
+
Shouldn't this be:
if (bypass_cache) {
if (use_mapped_ram)
error out
virFileWarpperFdNew
}
??
But only until the next patches which add the possibility of
bypass_cache + mapped_ram. Or maybe if QEMU does not support mapped_ram
with direct-io this should error out.
/* Create a domain from this XML */
if (!(def = virDomainDefParseString(data->xml, driver->xmlopt, qemuCaps,
VIR_DOMAIN_DEF_PARSE_INACTIVE |
@@ -777,6 +782,9 @@ qemuSaveImageStartVM(virConnectPtr conn,
if (reset_nvram)
start_flags |= VIR_QEMU_PROCESS_START_RESET_NVRAM;
+ if (header->features & QEMU_SAVE_FEATURE_MAPPED_RAM)
+ start_flags |= VIR_QEMU_PROCESS_START_MAPPED_RAM;
+
if (qemuProcessStartWithMemoryState(conn, driver, vm, fd, path, NULL, data,
asyncJob, start_flags, "restored",
&started) < 0) {
--
2.44.0