use multifd to restore parallel saves.
Signed-off-by: Claudio Fontana <cfontana(a)suse.de>
---
src/qemu/qemu_driver.c | 12 +++--
src/qemu/qemu_migration.c | 2 +-
src/qemu/qemu_migration.h | 6 +++
src/qemu/qemu_saveimage.c | 111 +++++++++++++++++++++++++++++++++++++-
src/qemu/qemu_saveimage.h | 9 +++-
5 files changed, 133 insertions(+), 7 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index d46db751ad..993e9d2c2a 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -5815,7 +5815,7 @@ static int
qemuDomainRestoreInternal(virConnectPtr conn,
const char *path,
const char *dxml,
- int nchannels G_GNUC_UNUSED,
+ int nchannels,
unsigned int flags,
int (*ensureACL)(virConnectPtr, virDomainDef *))
{
@@ -5907,8 +5907,14 @@ qemuDomainRestoreInternal(virConnectPtr conn,
flags) < 0)
goto cleanup;
- ret = qemuSaveImageStartVM(conn, driver, vm, &saveFd.fd, data, path,
- false, reset_nvram, true, VIR_ASYNC_JOB_START);
+ if (flags & VIR_DOMAIN_SAVE_PARALLEL) {
+ ret = qemuSaveImageLoadMultiFd(conn, vm, oflags, data, reset_nvram,
+ &saveFd, nchannels, VIR_ASYNC_JOB_START);
+
+ } else {
+ ret = qemuSaveImageStartVM(conn, driver, vm, &saveFd.fd, data, path,
+ false, reset_nvram, true, VIR_ASYNC_JOB_START);
+ }
qemuProcessEndJob(vm);
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 542428ab8e..7bab913fe5 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -1933,7 +1933,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
}
-static int
+int
qemuMigrationDstWaitForCompletion(virQEMUDriver *driver,
virDomainObj *vm,
virDomainAsyncJob asyncJob,
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index c3c48c19c0..38f4877cf0 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -191,6 +191,12 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
int retcode,
bool v3proto);
+int
+qemuMigrationDstWaitForCompletion(virQEMUDriver *driver,
+ virDomainObj *vm,
+ virDomainAsyncJob asyncJob,
+ bool postcopy);
+
int
qemuMigrationSrcConfirm(virQEMUDriver *driver,
virDomainObj *vm,
diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c
index d58e070bf7..2bc81035ae 100644
--- a/src/qemu/qemu_saveimage.c
+++ b/src/qemu/qemu_saveimage.c
@@ -551,6 +551,106 @@ qemuSaveImageCreate(virQEMUDriver *driver,
}
+int qemuSaveImageLoadMultiFd(virConnectPtr conn, virDomainObj *vm, int oflags,
+ virQEMUSaveData *data, bool reset_nvram,
+ virQEMUSaveFd *saveFd, int nchannels,
+ virDomainAsyncJob asyncJob)
+{
+ virQEMUDriver *driver = conn->privateData;
+ qemuDomainObjPrivate *priv = vm->privateData;
+ virQEMUSaveFd *multiFd = NULL;
+ g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
+ g_autoptr(virCommand) cmd = NULL;
+ g_autofree char *helper_path = NULL;
+ g_autofree char *sun_path = g_strdup_printf("%s/restore-multifd.sock",
cfg->saveDir);
+ int ret = -1;
+
+ if (!(helper_path = virFileFindResource("libvirt_multifd_helper",
+ abs_top_builddir "/src",
+ LIBEXECDIR)))
+ goto cleanup;
+ cmd = virCommandNewArgList(helper_path, sun_path, NULL);
+ virCommandAddArgFormat(cmd, "%d", nchannels);
+ virCommandAddArgFormat(cmd, "%d", saveFd->fd);
+ virCommandPassFD(cmd, saveFd->fd, 0);
+
+ /* Perform parallel multifd migration from files (main fd + channels) */
+ if (!(multiFd = qemuSaveImageCreateMultiFd(driver, vm, cmd, saveFd->path, oflags,
cfg, nchannels)))
+ goto cleanup;
+ if (qemuSaveImageStartVM(conn, driver, vm, NULL, data, sun_path,
+ false, reset_nvram, false, asyncJob) < 0)
+ goto cleanup;
+ if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_MULTIFD)) {
+ virReportError(VIR_ERR_OPERATION_FAILED, "%s",
+ _("QEMU multifd not supported"));
+ goto cleanup;
+ } else {
+ g_autoptr(qemuMigrationParams) migParams = qemuMigrationParamsNew();
+ bool bwParam = virQEMUCapsGet(priv->qemuCaps,
QEMU_CAPS_MIGRATION_PARAM_BANDWIDTH);
+
+ if (bwParam) {
+ if (qemuMigrationParamsSetULL(migParams,
+ QEMU_MIGRATION_PARAM_MAX_BANDWIDTH,
+ QEMU_DOMAIN_MIG_BANDWIDTH_MAX * 1024 * 1024)
< 0)
+ goto cleanup;
+ priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
+ } else {
+ if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
+ qemuMonitorSetMigrationSpeed(priv->mon,
+ QEMU_DOMAIN_MIG_BANDWIDTH_MAX);
+ priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
+ qemuDomainObjExitMonitor(vm);
+ }
+ }
+ qemuMigrationParamsSetCap(migParams, QEMU_MIGRATION_CAP_MULTIFD);
+ if (qemuMigrationParamsSetInt(migParams,
+ QEMU_MIGRATION_PARAM_MULTIFD_CHANNELS,
+ nchannels) < 0)
+ goto cleanup;
+ if (qemuMigrationParamsApply(driver, vm, asyncJob, migParams) < 0)
+ goto cleanup;
+
+ if (!virDomainObjIsActive(vm)) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("guest unexpectedly quit"));
+ goto cleanup;
+ }
+ /* multifd helper can now connect, then wait for migration to complete */
+ if (virCommandRunAsync(cmd, NULL) < 0)
+ goto cleanup;
+
+ if (qemuMigrationDstWaitForCompletion(driver, vm, asyncJob, false) < 0)
+ goto cleanup;
+
+ if (qemuSaveImageCloseMultiFd(multiFd, nchannels, vm) < 0)
+ goto cleanup;
+
+ if (qemuProcessRefreshState(driver, vm, asyncJob) < 0)
+ goto cleanup;
+
+ /* run 'cont' on the destination */
+ if (qemuProcessStartCPUs(driver, vm,
+ VIR_DOMAIN_RUNNING_RESTORED,
+ asyncJob) < 0) {
+ if (virGetLastErrorCode() == VIR_ERR_OK)
+ virReportError(VIR_ERR_OPERATION_FAILED,
+ "%s", _("failed to resume domain"));
+ goto cleanup;
+ }
+ if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0) {
+ VIR_WARN("Failed to save status on vm %s", vm->def->name);
+ goto cleanup;
+ }
+ }
+ qemuDomainEventEmitJobCompleted(driver, vm);
+ ret = 0;
+
+ cleanup:
+ ret = qemuSaveImageFreeMultiFd(multiFd, vm, nchannels, ret);
+ return ret;
+}
+
+
/* qemuSaveImageGetCompressionProgram:
* @imageFormat: String representation from qemu.conf for the compression
* image format being used (dump, save, or snapshot).
@@ -757,6 +857,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
bool started = false;
virObjectEvent *event;
VIR_AUTOCLOSE intermediatefd = -1;
+ g_autofree char *migrate_from = NULL;
g_autoptr(virCommand) cmd = NULL;
g_autofree char *errbuf = NULL;
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
@@ -803,8 +904,14 @@ qemuSaveImageStartVM(virConnectPtr conn,
if (cookie && !cookie->slirpHelper)
priv->disableSlirp = true;
+ if (fd) {
+ migrate_from = g_strdup("stdio");
+ } else {
+ migrate_from = g_strdup_printf("unix://%s", path);
+ }
+
if (qemuProcessStart(conn, driver, vm, cookie ? cookie->cpu : NULL,
- asyncJob, "stdio", *fd, path, wait_incoming,
+ asyncJob, migrate_from, fd ? *fd : -1, path, wait_incoming,
NULL,
VIR_NETDEV_VPORT_PROFILE_OP_RESTORE,
start_flags) == 0)
@@ -828,7 +935,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
VIR_DEBUG("Decompression binary stderr: %s", NULLSTR(errbuf));
virErrorRestore(&orig_err);
}
- if (VIR_CLOSE(*fd) < 0) {
+ if (fd && VIR_CLOSE(*fd) < 0) {
virReportSystemError(errno, _("cannot close file: %s"), path);
rc = -1;
}
diff --git a/src/qemu/qemu_saveimage.h b/src/qemu/qemu_saveimage.h
index 7be0892dde..ae7b3faa17 100644
--- a/src/qemu/qemu_saveimage.h
+++ b/src/qemu/qemu_saveimage.h
@@ -99,7 +99,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
bool reset_nvram,
bool wait_incoming,
virDomainAsyncJob asyncJob)
- ATTRIBUTE_NONNULL(4) ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(6);
+ ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(6);
int
qemuSaveImageOpen(virQEMUDriver *driver,
@@ -117,6 +117,13 @@ qemuSaveImageGetCompressionProgram(const char *imageFormat,
bool use_raw_on_fail)
ATTRIBUTE_NONNULL(2);
+int qemuSaveImageLoadMultiFd(virConnectPtr conn, virDomainObj *vm, int oflags,
+ virQEMUSaveData *data, bool reset_nvram,
+ virQEMUSaveFd *saveFd, int nchannels,
+ virDomainAsyncJob asyncJob)
+ ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_NONNULL(4)
+ ATTRIBUTE_NONNULL(6) G_GNUC_WARN_UNUSED_RESULT;
+
int
qemuSaveImageCreate(virQEMUDriver *driver,
virDomainObj *vm,
--
2.34.1