implement a function similar to qemuMigrationSrcToFile
that migrates to multiple files using QEMU multifd, and use it
for VIR_DOMAIN_SAVE_PARALLEL saves.
Signed-off-by: Claudio Fontana <cfontana(a)suse.de>
---
src/qemu/qemu_migration.c | 131 +++++++++++++++++++++++++-------------
src/qemu/qemu_migration.h | 7 ++
src/qemu/qemu_saveimage.c | 8 ++-
3 files changed, 98 insertions(+), 48 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index f130ccbe26..fa6cddcc59 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -5875,13 +5875,14 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
return dom;
}
-
/* Helper function called while vm is active. */
-int
-qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
- int fd,
- virCommand *compressor,
- virDomainAsyncJob asyncJob)
+static int
+qemuMigrationSrcToFileAux(virQEMUDriver *driver, virDomainObj *vm,
+ int fd,
+ virCommand *compressor,
+ virDomainAsyncJob asyncJob,
+ const char *sun_path,
+ int nchannels)
{
qemuDomainObjPrivate *priv = vm->privateData;
bool bwParam = virQEMUCapsGet(priv->qemuCaps,
QEMU_CAPS_MIGRATION_PARAM_BANDWIDTH);
@@ -5892,24 +5893,26 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
char *errbuf = NULL;
virErrorPtr orig_err = NULL;
g_autoptr(qemuMigrationParams) migParams = NULL;
+ bool needParams = (bwParam || sun_path);
+ if (sun_path && !virQEMUCapsGet(priv->qemuCaps,
QEMU_CAPS_MIGRATE_MULTIFD)) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("QEMU does not seem to support multifd migration, required
for parallel migration to files"));
+ return -1;
+ }
if (qemuMigrationSetDBusVMState(driver, vm) < 0)
return -1;
/* Increase migration bandwidth to unlimited since target is a file.
* Failure to change migration speed is not fatal. */
- if (bwParam) {
- if (!(migParams = qemuMigrationParamsNew()))
- return -1;
+ if (needParams && !((migParams = qemuMigrationParamsNew())))
+ return -1;
+ if (bwParam) {
if (qemuMigrationParamsSetULL(migParams,
QEMU_MIGRATION_PARAM_MAX_BANDWIDTH,
QEMU_DOMAIN_MIG_BANDWIDTH_MAX * 1024 * 1024) <
0)
return -1;
-
- if (qemuMigrationParamsApply(driver, vm, asyncJob, migParams) < 0)
- return -1;
-
priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
} else {
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
@@ -5920,6 +5923,17 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
}
}
+ if (sun_path) {
+ qemuMigrationParamsSetCap(migParams, QEMU_MIGRATION_CAP_MULTIFD);
+ if (qemuMigrationParamsSetInt(migParams,
+ QEMU_MIGRATION_PARAM_MULTIFD_CHANNELS,
+ nchannels) < 0)
+ return -1;
+ }
+
+ if (needParams && qemuMigrationParamsApply(driver, vm, asyncJob, migParams)
< 0)
+ return -1;
+
if (!virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("guest unexpectedly quit"));
@@ -5927,45 +5941,53 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
return -1;
}
- if (compressor && virPipe(pipeFD) < 0)
+ if (!sun_path && compressor && virPipe(pipeFD) < 0)
return -1;
- /* All right! We can use fd migration, which means that qemu
- * doesn't have to open() the file, so while we still have to
- * grant SELinux access, we can do it on fd and avoid cleanup
- * later, as well as skip futzing with cgroup. */
- if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def,
- compressor ? pipeFD[1] : fd) < 0)
- goto cleanup;
-
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
goto cleanup;
- if (!compressor) {
- rc = qemuMonitorMigrateToFd(priv->mon,
- QEMU_MONITOR_MIGRATE_BACKGROUND,
- fd);
+ if (sun_path) {
+ rc = qemuMonitorMigrateToSocket(priv->mon,
+ QEMU_MONITOR_MIGRATE_BACKGROUND,
+ sun_path);
} else {
- virCommandSetInputFD(compressor, pipeFD[0]);
- virCommandSetOutputFD(compressor, &fd);
- virCommandSetErrorBuffer(compressor, &errbuf);
- virCommandDoAsyncIO(compressor);
- if (virSetCloseExec(pipeFD[1]) < 0) {
- virReportSystemError(errno, "%s",
- _("Unable to set cloexec flag"));
- qemuDomainObjExitMonitor(vm);
- goto cleanup;
- }
- if (virCommandRunAsync(compressor, NULL) < 0) {
- qemuDomainObjExitMonitor(vm);
+ /*
+ * All right! We can use fd migration, which means that qemu
+ * doesn't have to open() the file, so while we still have to
+ * grant SELinux access, we can do it on fd and avoid cleanup
+ * later, as well as skip futzing with cgroup.
+ */
+ if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def,
+ compressor ? pipeFD[1] : fd) < 0)
goto cleanup;
+
+ if (!compressor) {
+ rc = qemuMonitorMigrateToFd(priv->mon,
+ QEMU_MONITOR_MIGRATE_BACKGROUND,
+ fd);
+ } else {
+ virCommandSetInputFD(compressor, pipeFD[0]);
+ virCommandSetOutputFD(compressor, &fd);
+ virCommandSetErrorBuffer(compressor, &errbuf);
+ virCommandDoAsyncIO(compressor);
+ if (virSetCloseExec(pipeFD[1]) < 0) {
+ virReportSystemError(errno, "%s",
+ _("Unable to set cloexec flag"));
+ qemuDomainObjExitMonitor(vm);
+ goto cleanup;
+ }
+ if (virCommandRunAsync(compressor, NULL) < 0) {
+ qemuDomainObjExitMonitor(vm);
+ goto cleanup;
+ }
+ rc = qemuMonitorMigrateToFd(priv->mon,
+ QEMU_MONITOR_MIGRATE_BACKGROUND,
+ pipeFD[1]);
+ if (VIR_CLOSE(pipeFD[0]) < 0 ||
+ VIR_CLOSE(pipeFD[1]) < 0)
+ VIR_WARN("failed to close intermediate pipe");
}
- rc = qemuMonitorMigrateToFd(priv->mon,
- QEMU_MONITOR_MIGRATE_BACKGROUND,
- pipeFD[1]);
- if (VIR_CLOSE(pipeFD[0]) < 0 ||
- VIR_CLOSE(pipeFD[1]) < 0)
- VIR_WARN("failed to close intermediate pipe");
}
qemuDomainObjExitMonitor(vm);
if (rc < 0)
@@ -5986,7 +6008,7 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
goto cleanup;
}
- if (compressor && virCommandWait(compressor, NULL) < 0)
+ if (!sun_path && compressor && virCommandWait(compressor, NULL) <
0)
goto cleanup;
qemuDomainEventEmitJobCompleted(driver, vm);
@@ -6025,6 +6047,25 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
return ret;
}
+int
+qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
+ int fd,
+ virCommand *compressor,
+ virDomainAsyncJob asyncJob)
+{
+ return qemuMigrationSrcToFileAux(driver, vm, fd, compressor,
+ asyncJob, NULL, -1);
+}
+
+int
+qemuMigrationSrcToFilesMultiFd(virQEMUDriver *driver, virDomainObj *vm,
+ virDomainAsyncJob asyncJob,
+ const char *sun_path,
+ int nchannels)
+{
+ return qemuMigrationSrcToFileAux(driver, vm, -1, NULL,
+ asyncJob, sun_path, nchannels);
+}
int
qemuMigrationSrcCancel(virQEMUDriver *driver,
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index a8afa66119..ddc8e65489 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -213,6 +213,13 @@ qemuMigrationSrcToFile(virQEMUDriver *driver,
virDomainAsyncJob asyncJob)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
+int
+qemuMigrationSrcToFilesMultiFd(virQEMUDriver *driver, virDomainObj *vm,
+ virDomainAsyncJob asyncJob,
+ const char *sun_path,
+ int nchannels)
+ ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
+
int
qemuMigrationSrcCancel(virQEMUDriver *driver,
virDomainObj *vm);
diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c
index 7d8527596a..8abb919459 100644
--- a/src/qemu/qemu_saveimage.c
+++ b/src/qemu/qemu_saveimage.c
@@ -578,8 +578,11 @@ qemuSaveImageCreate(virQEMUDriver *driver,
oflags |= O_DIRECT;
}
- if (virQEMUSaveFdInit(&saveFd, path, oflags, cfg, false) < 0)
+ if (virQEMUSaveFdInit(&saveFd, path, oflags, cfg, flags &
VIR_DOMAIN_SAVE_PARALLEL) < 0)
goto cleanup;
+ if (nconn > 0) {
+ data->header.multifd_channels = nconn;
+ }
if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, saveFd.fd)
< 0)
goto cleanup;
if (virQEMUSaveDataWrite(data, saveFd.fd, saveFd.path) < 0)
@@ -610,8 +613,7 @@ qemuSaveImageCreate(virQEMUDriver *driver,
goto cleanup;
if (chown(sun_path, cfg->user, cfg->group) < 0)
goto cleanup;
- /* still using single fd migration for now */
- if (qemuMigrationSrcToFile(driver, vm, saveFd.fd, compressor, asyncJob) < 0)
+ if (qemuMigrationSrcToFilesMultiFd(driver, vm, asyncJob, sun_path, nconn) <
0)
goto cleanup;
} else {
/* Perform non-parallel migration to file */
--
2.26.2