use multifd to restore parallel images,
if VIR_DOMAIN_SAVE_PARALLEL is enabled.
Signed-off-by: Claudio Fontana <cfontana(a)suse.de>
---
src/qemu/qemu_driver.c | 16 +++++-
src/qemu/qemu_migration.c | 2 +-
src/qemu/qemu_migration.h | 6 ++
src/qemu/qemu_saveimage.c | 114 +++++++++++++++++++++++++++++++++++++-
src/qemu/qemu_saveimage.h | 8 ++-
5 files changed, 139 insertions(+), 7 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 3d2b5d78e6..c3b2afe0d8 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -5883,7 +5883,7 @@ qemuDomainRestoreInternal(virConnectPtr conn,
}
oflags |= O_DIRECT;
}
- if (virQEMUSaveFdInit(&saveFd, path, oflags, cfg, false) < 0)
+ if (virQEMUSaveFdInit(&saveFd, path, oflags, cfg, flags &
VIR_DOMAIN_SAVE_PARALLEL) < 0)
return -1;
if (qemuSaveImageOpen(driver, NULL, &def, &data, false, &saveFd) < 0)
goto cleanup;
@@ -5939,8 +5939,18 @@ qemuDomainRestoreInternal(virConnectPtr conn,
flags) < 0)
goto cleanup;
- ret = qemuSaveImageStartVM(conn, driver, vm, &saveFd.fd, data, path,
- false, reset_nvram, true, VIR_ASYNC_JOB_START);
+ if (flags & VIR_DOMAIN_SAVE_PARALLEL) {
+ ret = qemuSaveImageLoadMultiFd(conn, vm, data, reset_nvram,
+ &saveFd, VIR_ASYNC_JOB_START);
+
+ } else if (data->header.multifd_channels != 0) {
+ virReportError(VIR_ERR_OPERATION_FAILED, "%s",
+ _("save file seems to contain multifd channels information,
and restore is not flagged as 'parallel'"));
+ ret = -1;
+ } else {
+ ret = qemuSaveImageStartVM(conn, driver, vm, &saveFd.fd, data, path,
+ false, reset_nvram, true, VIR_ASYNC_JOB_START);
+ }
qemuProcessEndJob(vm);
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 439dd7478d..c214f88dd1 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -1920,7 +1920,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
}
-static int
+int
qemuMigrationDstWaitForCompletion(virQEMUDriver *driver,
virDomainObj *vm,
virDomainAsyncJob asyncJob,
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index c3c48c19c0..38f4877cf0 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -191,6 +191,12 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
int retcode,
bool v3proto);
+int
+qemuMigrationDstWaitForCompletion(virQEMUDriver *driver,
+ virDomainObj *vm,
+ virDomainAsyncJob asyncJob,
+ bool postcopy);
+
int
qemuMigrationSrcConfirm(virQEMUDriver *driver,
virDomainObj *vm,
diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c
index 9c86e20f30..2ed02d31de 100644
--- a/src/qemu/qemu_saveimage.c
+++ b/src/qemu/qemu_saveimage.c
@@ -643,6 +643,105 @@ qemuSaveImageCreate(virQEMUDriver *driver,
}
+int qemuSaveImageLoadMultiFd(virConnectPtr conn, virDomainObj *vm,
+ virQEMUSaveData *data, bool reset_nvram,
+ virQEMUSaveFd *saveFd, virDomainAsyncJob asyncJob)
+{
+ virQEMUDriver *driver = conn->privateData;
+ qemuDomainObjPrivate *priv = vm->privateData;
+ g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
+ g_autoptr(virCommand) cmd = NULL;
+ g_autofree char *helper_path = NULL;
+ g_autofree char *sun_path = g_strdup_printf("%s/restore-multifd.sock",
cfg->saveDir);
+ bool qemu_started = false;
+ int ret = -1;
+
+ if (!(helper_path = virFileFindResource("libvirt_multifd_helper",
+ abs_top_builddir "/src",
+ LIBEXECDIR)))
+ goto cleanup;
+ cmd = virCommandNewArgList(helper_path, sun_path, saveFd->path, NULL);
+ virCommandAddArgFormat(cmd, "%d", saveFd->nchannels);
+ virCommandAddArgFormat(cmd, "%d", saveFd->fd);
+ virCommandPassFD(cmd, saveFd->fd, 0);
+
+ if (qemuSaveImageStartVM(conn, driver, vm, NULL, data, sun_path,
+ false, reset_nvram, false, asyncJob) < 0)
+ goto cleanup;
+
+ qemu_started = true;
+
+ if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_MULTIFD)) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("QEMU does not seem to support multifd migration"));
+ goto cleanup;
+ } else {
+ g_autoptr(qemuMigrationParams) migParams = qemuMigrationParamsNew();
+ bool bwParam = virQEMUCapsGet(priv->qemuCaps,
QEMU_CAPS_MIGRATION_PARAM_BANDWIDTH);
+
+ if (bwParam) {
+ if (qemuMigrationParamsSetULL(migParams,
+ QEMU_MIGRATION_PARAM_MAX_BANDWIDTH,
+ QEMU_DOMAIN_MIG_BANDWIDTH_MAX * 1024 * 1024)
< 0)
+ goto cleanup;
+ priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
+ } else {
+ if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
+ qemuMonitorSetMigrationSpeed(priv->mon,
+ QEMU_DOMAIN_MIG_BANDWIDTH_MAX);
+ priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
+ qemuDomainObjExitMonitor(vm);
+ }
+ }
+ qemuMigrationParamsSetCap(migParams, QEMU_MIGRATION_CAP_MULTIFD);
+ if (qemuMigrationParamsSetInt(migParams,
+ QEMU_MIGRATION_PARAM_MULTIFD_CHANNELS,
+ saveFd->nchannels) < 0)
+ goto cleanup;
+ if (qemuMigrationParamsApply(driver, vm, asyncJob, migParams) < 0)
+ goto cleanup;
+
+ if (!virDomainObjIsActive(vm)) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("guest unexpectedly quit"));
+ goto cleanup;
+ }
+ /* multifd helper can now connect, then wait for migration to complete */
+ if (virCommandRunAsync(cmd, NULL) < 0)
+ goto cleanup;
+
+ if (qemuMigrationDstWaitForCompletion(driver, vm, asyncJob, false) < 0)
+ goto cleanup;
+
+ if (qemuProcessRefreshState(driver, vm, asyncJob) < 0)
+ goto cleanup;
+
+ /* run 'cont' on the destination */
+ if (qemuProcessStartCPUs(driver, vm,
+ VIR_DOMAIN_RUNNING_RESTORED,
+ asyncJob) < 0) {
+ if (virGetLastErrorCode() == VIR_ERR_OK)
+ virReportError(VIR_ERR_OPERATION_FAILED,
+ "%s", _("failed to resume domain"));
+ goto cleanup;
+ }
+ if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0) {
+ VIR_WARN("Failed to save status on vm %s", vm->def->name);
+ goto cleanup;
+ }
+ }
+ qemuDomainEventEmitJobCompleted(driver, vm);
+ ret = 0;
+
+ cleanup:
+ if (ret < 0 && qemu_started) {
+ qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
+ asyncJob, VIR_QEMU_PROCESS_STOP_MIGRATED);
+ }
+ return ret;
+}
+
+
/* qemuSaveImageGetCompressionProgram:
* @imageFormat: String representation from qemu.conf for the compression
* image format being used (dump, save, or snapshot).
@@ -758,6 +857,10 @@ qemuSaveImageOpen(virQEMUDriver *driver,
}
return ret;
}
+ if (data->header.multifd_channels > 0) {
+ if (virQEMUSaveFdAddChannels(saveFd, data->header.multifd_channels) < 0)
+ return -1;
+ }
/* Create a domain from this XML */
if (!(def = virDomainDefParseString(data->xml, driver->xmlopt, qemuCaps,
@@ -788,6 +891,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
bool started = false;
virObjectEvent *event;
VIR_AUTOCLOSE intermediatefd = -1;
+ g_autofree char *migrate_from = NULL;
g_autoptr(virCommand) cmd = NULL;
g_autofree char *errbuf = NULL;
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
@@ -834,8 +938,14 @@ qemuSaveImageStartVM(virConnectPtr conn,
if (cookie && !cookie->slirpHelper)
priv->disableSlirp = true;
+ if (fd) {
+ migrate_from = g_strdup("stdio");
+ } else {
+ migrate_from = g_strdup_printf("unix://%s", path);
+ }
+
if (qemuProcessStart(conn, driver, vm, cookie ? cookie->cpu : NULL,
- asyncJob, "stdio", *fd, path, wait_incoming,
+ asyncJob, migrate_from, fd ? *fd : -1, path, wait_incoming,
NULL,
VIR_NETDEV_VPORT_PROFILE_OP_RESTORE,
start_flags) == 0)
@@ -859,7 +969,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
VIR_DEBUG("Decompression binary stderr: %s", NULLSTR(errbuf));
virErrorRestore(&orig_err);
}
- if (VIR_CLOSE(*fd) < 0) {
+ if (fd && VIR_CLOSE(*fd) < 0) {
virReportSystemError(errno, _("cannot close file: %s"), path);
rc = -1;
}
diff --git a/src/qemu/qemu_saveimage.h b/src/qemu/qemu_saveimage.h
index 91ea8dcbf2..158bcaff50 100644
--- a/src/qemu/qemu_saveimage.h
+++ b/src/qemu/qemu_saveimage.h
@@ -99,7 +99,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
bool reset_nvram,
bool wait_incoming,
virDomainAsyncJob asyncJob)
- ATTRIBUTE_NONNULL(4) ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(6);
+ ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(6);
int
qemuSaveImageOpen(virQEMUDriver *driver,
@@ -117,6 +117,12 @@ qemuSaveImageGetCompressionProgram(const char *imageFormat,
bool use_raw_on_fail)
ATTRIBUTE_NONNULL(2);
+int qemuSaveImageLoadMultiFd(virConnectPtr conn, virDomainObj *vm,
+ virQEMUSaveData *data, bool reset_nvram,
+ virQEMUSaveFd *saveFd, virDomainAsyncJob asyncJob)
+ ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_NONNULL(3)
+ ATTRIBUTE_NONNULL(5) G_GNUC_WARN_UNUSED_RESULT;
+
int
qemuSaveImageCreate(virQEMUDriver *driver,
virDomainObj *vm,
--
2.26.2