use multifd to restore parallel images,
if VIR_DOMAIN_SAVE_PARALLEL is enabled.
Signed-off-by: Claudio Fontana <cfontana(a)suse.de>
---
src/qemu/qemu_driver.c | 16 ++++-
src/qemu/qemu_migration.c | 2 +-
src/qemu/qemu_migration.h | 6 ++
src/qemu/qemu_saveimage.c | 119 +++++++++++++++++++++++++++++++++++++-
src/qemu/qemu_saveimage.h | 8 ++-
5 files changed, 144 insertions(+), 7 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 4dc106a621..d1dbf8f7ab 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -5869,7 +5869,7 @@ qemuDomainRestoreInternal(virConnectPtr conn,
}
oflags |= O_DIRECT;
}
- if (virQEMUSaveFdInit(&saveFd, path, 0, oflags, cfg, false) < 0)
+ if (virQEMUSaveFdInit(&saveFd, path, 0, oflags, cfg, flags &
VIR_DOMAIN_SAVE_PARALLEL) < 0)
return -1;
if (qemuSaveImageOpen(driver, NULL, &def, &data, false, &saveFd) < 0)
goto cleanup;
@@ -5925,8 +5925,18 @@ qemuDomainRestoreInternal(virConnectPtr conn,
flags) < 0)
goto cleanup;
- ret = qemuSaveImageStartVM(conn, driver, vm, &saveFd.fd, data, path,
- false, reset_nvram, true, VIR_ASYNC_JOB_START);
+ if (flags & VIR_DOMAIN_SAVE_PARALLEL) {
+ ret = qemuSaveImageLoadMultiFd(conn, vm, oflags, data, reset_nvram,
+ &saveFd, VIR_ASYNC_JOB_START);
+
+ } else if (data->header.multifd_channels != 0) {
+ virReportError(VIR_ERR_OPERATION_FAILED, "%s",
+ _("save file seems to contain multifd channels information,
and restore is not flagged as 'parallel'"));
+ ret = -1;
+ } else {
+ ret = qemuSaveImageStartVM(conn, driver, vm, &saveFd.fd, data, path,
+ false, reset_nvram, true, VIR_ASYNC_JOB_START);
+ }
qemuProcessEndJob(vm);
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 6250b707b3..c4e1837419 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -1920,7 +1920,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
}
-static int
+int
qemuMigrationDstWaitForCompletion(virQEMUDriver *driver,
virDomainObj *vm,
virDomainAsyncJob asyncJob,
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index c3c48c19c0..38f4877cf0 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -191,6 +191,12 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
int retcode,
bool v3proto);
+int
+qemuMigrationDstWaitForCompletion(virQEMUDriver *driver,
+ virDomainObj *vm,
+ virDomainAsyncJob asyncJob,
+ bool postcopy);
+
int
qemuMigrationSrcConfirm(virQEMUDriver *driver,
virDomainObj *vm,
diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c
index c652293a02..7becaa5c25 100644
--- a/src/qemu/qemu_saveimage.c
+++ b/src/qemu/qemu_saveimage.c
@@ -686,6 +686,114 @@ qemuSaveImageCreate(virQEMUDriver *driver,
}
+int qemuSaveImageLoadMultiFd(virConnectPtr conn, virDomainObj *vm, int oflags,
+ virQEMUSaveData *data, bool reset_nvram,
+ virQEMUSaveFd *saveFd, virDomainAsyncJob asyncJob)
+{
+ virQEMUDriver *driver = conn->privateData;
+ qemuDomainObjPrivate *priv = vm->privateData;
+ virQEMUSaveFd *multiFd = NULL;
+ g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
+ g_autoptr(virCommand) cmd = NULL;
+ g_autofree char *helper_path = NULL;
+ g_autofree char *sun_path = g_strdup_printf("%s/restore-multifd.sock",
cfg->saveDir);
+ bool qemu_started = false;
+ int ret = -1;
+ int nchannels = data->header.multifd_channels;
+
+ if (!(helper_path = virFileFindResource("libvirt_multifd_helper",
+ abs_top_builddir "/src",
+ LIBEXECDIR)))
+ goto cleanup;
+ cmd = virCommandNewArgList(helper_path, sun_path, NULL);
+ virCommandAddArgFormat(cmd, "%d", nchannels);
+ virCommandAddArgFormat(cmd, "%d", saveFd->fd);
+ virCommandPassFD(cmd, saveFd->fd, 0);
+
+ /* Perform parallel multifd migration from files (main fd + channels) */
+ if (!(multiFd = qemuSaveImageCreateMultiFd(driver, vm, cmd, saveFd->path, oflags,
cfg, nchannels)))
+ goto cleanup;
+ if (qemuSaveImageStartVM(conn, driver, vm, NULL, data, sun_path,
+ false, reset_nvram, false, asyncJob) < 0)
+ goto cleanup;
+
+ qemu_started = true;
+
+ if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_MULTIFD)) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("QEMU does not seem to support multifd migration, required
for parallel migration from files"));
+ goto cleanup;
+ } else {
+ g_autoptr(qemuMigrationParams) migParams = qemuMigrationParamsNew();
+ bool bwParam = virQEMUCapsGet(priv->qemuCaps,
QEMU_CAPS_MIGRATION_PARAM_BANDWIDTH);
+
+ if (bwParam) {
+ if (qemuMigrationParamsSetULL(migParams,
+ QEMU_MIGRATION_PARAM_MAX_BANDWIDTH,
+ QEMU_DOMAIN_MIG_BANDWIDTH_MAX * 1024 * 1024)
< 0)
+ goto cleanup;
+ priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
+ } else {
+ if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
+ qemuMonitorSetMigrationSpeed(priv->mon,
+ QEMU_DOMAIN_MIG_BANDWIDTH_MAX);
+ priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
+ qemuDomainObjExitMonitor(vm);
+ }
+ }
+ qemuMigrationParamsSetCap(migParams, QEMU_MIGRATION_CAP_MULTIFD);
+ if (qemuMigrationParamsSetInt(migParams,
+ QEMU_MIGRATION_PARAM_MULTIFD_CHANNELS,
+ nchannels) < 0)
+ goto cleanup;
+ if (qemuMigrationParamsApply(driver, vm, asyncJob, migParams) < 0)
+ goto cleanup;
+
+ if (!virDomainObjIsActive(vm)) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("guest unexpectedly quit"));
+ goto cleanup;
+ }
+ /* multifd helper can now connect, then wait for migration to complete */
+ if (virCommandRunAsync(cmd, NULL) < 0)
+ goto cleanup;
+
+ if (qemuMigrationDstWaitForCompletion(driver, vm, asyncJob, false) < 0)
+ goto cleanup;
+
+ if (qemuSaveImageCloseMultiFd(multiFd, nchannels, vm) < 0)
+ goto cleanup;
+
+ if (qemuProcessRefreshState(driver, vm, asyncJob) < 0)
+ goto cleanup;
+
+ /* run 'cont' on the destination */
+ if (qemuProcessStartCPUs(driver, vm,
+ VIR_DOMAIN_RUNNING_RESTORED,
+ asyncJob) < 0) {
+ if (virGetLastErrorCode() == VIR_ERR_OK)
+ virReportError(VIR_ERR_OPERATION_FAILED,
+ "%s", _("failed to resume domain"));
+ goto cleanup;
+ }
+ if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0) {
+ VIR_WARN("Failed to save status on vm %s", vm->def->name);
+ goto cleanup;
+ }
+ }
+ qemuDomainEventEmitJobCompleted(driver, vm);
+ ret = 0;
+
+ cleanup:
+ if (ret < 0 && qemu_started) {
+ qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
+ asyncJob, VIR_QEMU_PROCESS_STOP_MIGRATED);
+ }
+ ret = qemuSaveImageFreeMultiFd(multiFd, vm, nchannels, ret);
+ return ret;
+}
+
+
/* qemuSaveImageGetCompressionProgram:
* @imageFormat: String representation from qemu.conf for the compression
* image format being used (dump, save, or snapshot).
@@ -831,6 +939,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
bool started = false;
virObjectEvent *event;
VIR_AUTOCLOSE intermediatefd = -1;
+ g_autofree char *migrate_from = NULL;
g_autoptr(virCommand) cmd = NULL;
g_autofree char *errbuf = NULL;
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
@@ -877,8 +986,14 @@ qemuSaveImageStartVM(virConnectPtr conn,
if (cookie && !cookie->slirpHelper)
priv->disableSlirp = true;
+ if (fd) {
+ migrate_from = g_strdup("stdio");
+ } else {
+ migrate_from = g_strdup_printf("unix://%s", path);
+ }
+
if (qemuProcessStart(conn, driver, vm, cookie ? cookie->cpu : NULL,
- asyncJob, "stdio", *fd, path, wait_incoming,
+ asyncJob, migrate_from, fd ? *fd : -1, path, wait_incoming,
NULL,
VIR_NETDEV_VPORT_PROFILE_OP_RESTORE,
start_flags) == 0)
@@ -902,7 +1017,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
VIR_DEBUG("Decompression binary stderr: %s", NULLSTR(errbuf));
virErrorRestore(&orig_err);
}
- if (VIR_CLOSE(*fd) < 0) {
+ if (fd && VIR_CLOSE(*fd) < 0) {
virReportSystemError(errno, _("cannot close file: %s"), path);
rc = -1;
}
diff --git a/src/qemu/qemu_saveimage.h b/src/qemu/qemu_saveimage.h
index 952c5cd58a..99cc9a81a9 100644
--- a/src/qemu/qemu_saveimage.h
+++ b/src/qemu/qemu_saveimage.h
@@ -101,7 +101,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
bool reset_nvram,
bool wait_incoming,
virDomainAsyncJob asyncJob)
- ATTRIBUTE_NONNULL(4) ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(6);
+ ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(6);
int
qemuSaveImageOpen(virQEMUDriver *driver,
@@ -119,6 +119,12 @@ qemuSaveImageGetCompressionProgram(const char *imageFormat,
bool use_raw_on_fail)
ATTRIBUTE_NONNULL(2);
+int qemuSaveImageLoadMultiFd(virConnectPtr conn, virDomainObj *vm, int oflags,
+ virQEMUSaveData *data, bool reset_nvram,
+ virQEMUSaveFd *saveFd, virDomainAsyncJob asyncJob)
+ ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_NONNULL(4)
+ ATTRIBUTE_NONNULL(6) G_GNUC_WARN_UNUSED_RESULT;
+
int
qemuSaveImageCreate(virQEMUDriver *driver,
virDomainObj *vm,
--
2.35.3