use multifd to restore parallel saves.
Signed-off-by: Claudio Fontana <cfontana(a)suse.de>
---
src/qemu/qemu_driver.c | 10 +++-
src/qemu/qemu_migration.c | 2 +-
src/qemu/qemu_migration.h | 6 ++
src/qemu/qemu_saveimage.c | 119 +++++++++++++++++++++++++++++++++++++-
src/qemu/qemu_saveimage.h | 8 ++-
5 files changed, 139 insertions(+), 6 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 0e8dd7748c..72ab679336 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -5905,8 +5905,14 @@ qemuDomainRestoreInternal(virConnectPtr conn,
flags) < 0)
goto cleanup;
- ret = qemuSaveImageStartVM(conn, driver, vm, &saveFd.fd, data, path,
- false, reset_nvram, true, VIR_ASYNC_JOB_START);
+ if (flags & VIR_DOMAIN_SAVE_PARALLEL) {
+ ret = qemuSaveImageLoadMultiFd(conn, vm, oflags, data, reset_nvram,
+ &saveFd, VIR_ASYNC_JOB_START);
+
+ } else {
+ ret = qemuSaveImageStartVM(conn, driver, vm, &saveFd.fd, data, path,
+ false, reset_nvram, true, VIR_ASYNC_JOB_START);
+ }
qemuProcessEndJob(vm);
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 93cd446b23..12b7e84f25 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -1933,7 +1933,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
}
-static int
+int
qemuMigrationDstWaitForCompletion(virQEMUDriver *driver,
virDomainObj *vm,
virDomainAsyncJob asyncJob,
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index c3c48c19c0..38f4877cf0 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -191,6 +191,12 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
int retcode,
bool v3proto);
+int
+qemuMigrationDstWaitForCompletion(virQEMUDriver *driver,
+ virDomainObj *vm,
+ virDomainAsyncJob asyncJob,
+ bool postcopy);
+
int
qemuMigrationSrcConfirm(virQEMUDriver *driver,
virDomainObj *vm,
diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c
index 83dea78718..753e297226 100644
--- a/src/qemu/qemu_saveimage.c
+++ b/src/qemu/qemu_saveimage.c
@@ -579,6 +579,114 @@ qemuSaveImageCreate(virQEMUDriver *driver,
}
+int qemuSaveImageLoadMultiFd(virConnectPtr conn, virDomainObj *vm, int oflags,
+ virQEMUSaveData *data, bool reset_nvram,
+ virQEMUSaveFd *saveFd, virDomainAsyncJob asyncJob)
+{
+ virQEMUDriver *driver = conn->privateData;
+ qemuDomainObjPrivate *priv = vm->privateData;
+ virQEMUSaveFd *multiFd = NULL;
+ g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
+ g_autoptr(virCommand) cmd = NULL;
+ g_autofree char *helper_path = NULL;
+ g_autofree char *sun_path = g_strdup_printf("%s/restore-multifd.sock",
cfg->saveDir);
+ bool qemu_started = false;
+ int ret = -1;
+ int nchannels = data->header.multifd_channels;
+
+ if (!(helper_path = virFileFindResource("libvirt_multifd_helper",
+ abs_top_builddir "/src",
+ LIBEXECDIR)))
+ goto cleanup;
+ cmd = virCommandNewArgList(helper_path, sun_path, NULL);
+ virCommandAddArgFormat(cmd, "%d", nchannels);
+ virCommandAddArgFormat(cmd, "%d", saveFd->fd);
+ virCommandPassFD(cmd, saveFd->fd, 0);
+
+ /* Perform parallel multifd migration from files (main fd + channels) */
+ if (!(multiFd = qemuSaveImageCreateMultiFd(driver, vm, cmd, saveFd->path, oflags,
cfg, nchannels)))
+ goto cleanup;
+ if (qemuSaveImageStartVM(conn, driver, vm, NULL, data, sun_path,
+ false, reset_nvram, false, asyncJob) < 0)
+ goto cleanup;
+
+ qemu_started = true;
+
+ if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_MULTIFD)) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("QEMU does not seem to support multifd migration, required
for parallel migration from files"));
+ goto cleanup;
+ } else {
+ g_autoptr(qemuMigrationParams) migParams = qemuMigrationParamsNew();
+ bool bwParam = virQEMUCapsGet(priv->qemuCaps,
QEMU_CAPS_MIGRATION_PARAM_BANDWIDTH);
+
+ if (bwParam) {
+ if (qemuMigrationParamsSetULL(migParams,
+ QEMU_MIGRATION_PARAM_MAX_BANDWIDTH,
+ QEMU_DOMAIN_MIG_BANDWIDTH_MAX * 1024 * 1024)
< 0)
+ goto cleanup;
+ priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
+ } else {
+ if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
+ qemuMonitorSetMigrationSpeed(priv->mon,
+ QEMU_DOMAIN_MIG_BANDWIDTH_MAX);
+ priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
+ qemuDomainObjExitMonitor(vm);
+ }
+ }
+ qemuMigrationParamsSetCap(migParams, QEMU_MIGRATION_CAP_MULTIFD);
+ if (qemuMigrationParamsSetInt(migParams,
+ QEMU_MIGRATION_PARAM_MULTIFD_CHANNELS,
+ nchannels) < 0)
+ goto cleanup;
+ if (qemuMigrationParamsApply(driver, vm, asyncJob, migParams) < 0)
+ goto cleanup;
+
+ if (!virDomainObjIsActive(vm)) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("guest unexpectedly quit"));
+ goto cleanup;
+ }
+ /* multifd helper can now connect, then wait for migration to complete */
+ if (virCommandRunAsync(cmd, NULL) < 0)
+ goto cleanup;
+
+ if (qemuMigrationDstWaitForCompletion(driver, vm, asyncJob, false) < 0)
+ goto cleanup;
+
+ if (qemuSaveImageCloseMultiFd(multiFd, nchannels, vm) < 0)
+ goto cleanup;
+
+ if (qemuProcessRefreshState(driver, vm, asyncJob) < 0)
+ goto cleanup;
+
+ /* run 'cont' on the destination */
+ if (qemuProcessStartCPUs(driver, vm,
+ VIR_DOMAIN_RUNNING_RESTORED,
+ asyncJob) < 0) {
+ if (virGetLastErrorCode() == VIR_ERR_OK)
+ virReportError(VIR_ERR_OPERATION_FAILED,
+ "%s", _("failed to resume domain"));
+ goto cleanup;
+ }
+ if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0) {
+ VIR_WARN("Failed to save status on vm %s", vm->def->name);
+ goto cleanup;
+ }
+ }
+ qemuDomainEventEmitJobCompleted(driver, vm);
+ ret = 0;
+
+ cleanup:
+ if (ret < 0 && qemu_started) {
+ qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
+ asyncJob, VIR_QEMU_PROCESS_STOP_MIGRATED);
+ }
+ ret = qemuSaveImageFreeMultiFd(multiFd, vm, nchannels, ret);
+ return ret;
+}
+
+
/* qemuSaveImageGetCompressionProgram:
* @imageFormat: String representation from qemu.conf for the compression
* image format being used (dump, save, or snapshot).
@@ -789,6 +897,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
bool started = false;
virObjectEvent *event;
VIR_AUTOCLOSE intermediatefd = -1;
+ g_autofree char *migrate_from = NULL;
g_autoptr(virCommand) cmd = NULL;
g_autofree char *errbuf = NULL;
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
@@ -835,8 +944,14 @@ qemuSaveImageStartVM(virConnectPtr conn,
if (cookie && !cookie->slirpHelper)
priv->disableSlirp = true;
+ if (fd) {
+ migrate_from = g_strdup("stdio");
+ } else {
+ migrate_from = g_strdup_printf("unix://%s", path);
+ }
+
if (qemuProcessStart(conn, driver, vm, cookie ? cookie->cpu : NULL,
- asyncJob, "stdio", *fd, path, wait_incoming,
+ asyncJob, migrate_from, fd ? *fd : -1, path, wait_incoming,
NULL,
VIR_NETDEV_VPORT_PROFILE_OP_RESTORE,
start_flags) == 0)
@@ -860,7 +975,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
VIR_DEBUG("Decompression binary stderr: %s", NULLSTR(errbuf));
virErrorRestore(&orig_err);
}
- if (VIR_CLOSE(*fd) < 0) {
+ if (fd && VIR_CLOSE(*fd) < 0) {
virReportSystemError(errno, _("cannot close file: %s"), path);
rc = -1;
}
diff --git a/src/qemu/qemu_saveimage.h b/src/qemu/qemu_saveimage.h
index 412624b968..719e6506a5 100644
--- a/src/qemu/qemu_saveimage.h
+++ b/src/qemu/qemu_saveimage.h
@@ -101,7 +101,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
bool reset_nvram,
bool wait_incoming,
virDomainAsyncJob asyncJob)
- ATTRIBUTE_NONNULL(4) ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(6);
+ ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(6);
int
qemuSaveImageOpen(virQEMUDriver *driver,
@@ -119,6 +119,12 @@ qemuSaveImageGetCompressionProgram(const char *imageFormat,
bool use_raw_on_fail)
ATTRIBUTE_NONNULL(2);
+int qemuSaveImageLoadMultiFd(virConnectPtr conn, virDomainObj *vm, int oflags,
+ virQEMUSaveData *data, bool reset_nvram,
+ virQEMUSaveFd *saveFd, virDomainAsyncJob asyncJob)
+ ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_NONNULL(4)
+ ATTRIBUTE_NONNULL(6) G_GNUC_WARN_UNUSED_RESULT;
+
int
qemuSaveImageCreate(virQEMUDriver *driver,
virDomainObj *vm,
--
2.35.3