For RDMA migration we update memory locking limit, but never set it back
once migration finishes (on the destination host) or aborts (on the
source host).
Signed-off-by: Jiri Denemark <jdenemar(a)redhat.com>
---
src/qemu/qemu_domain.c | 12 ++++++++++++
src/qemu/qemu_domain.h | 3 +++
src/qemu/qemu_migration.c | 11 ++++++++---
3 files changed, 23 insertions(+), 3 deletions(-)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index e363993739..60ed358871 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -2413,6 +2413,11 @@ qemuDomainObjPrivateXMLFormat(virBuffer *buf,
priv->originalMemlock);
}
+ if (priv->preMigrationMemlock > 0) {
+ virBufferAsprintf(buf,
"<preMigrationMemlock>%llu</preMigrationMemlock>\n",
+ priv->preMigrationMemlock);
+ }
+
return 0;
}
@@ -3139,6 +3144,13 @@ qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt,
return -1;
}
+ if (virXPathULongLong("string(./preMigrationMemlock)", ctxt,
+ &priv->preMigrationMemlock) == -2) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("failed to parse pre-migration memlock limit"));
+ return -1;
+ }
+
return 0;
}
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 6d35f61dfd..499ad03f91 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -140,6 +140,9 @@ struct _qemuDomainObjPrivate {
int nbdPort; /* Port used for migration with NBD */
unsigned short migrationPort;
int preMigrationState;
+ unsigned long long preMigrationMemlock; /* Original RLIMIT_MEMLOCK in case
+ it was changed for the current
+ migration job. */
virChrdevs *devs;
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index fe63f45629..ffae5576d2 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -3181,7 +3181,8 @@ qemuMigrationDstPrepareActive(virQEMUDriver *driver,
if (STREQ_NULLABLE(protocol, "rdma") &&
vm->def->mem.hard_limit > 0 &&
- qemuDomainSetMaxMemLock(vm, vm->def->mem.hard_limit << 10, NULL) <
0) {
+ qemuDomainSetMaxMemLock(vm, vm->def->mem.hard_limit << 10,
+ &priv->preMigrationMemlock) < 0) {
goto error;
}
@@ -3945,6 +3946,7 @@ qemuMigrationSrcComplete(virQEMUDriver *driver,
VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
virObjectEventStateQueue(driver->domainEventState, event);
qemuDomainEventEmitJobCompleted(driver, vm);
+ priv->preMigrationMemlock = 0;
}
@@ -4035,6 +4037,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->migParams, priv->job.apiFlags);
+ qemuDomainSetMaxMemLock(vm, 0, &priv->preMigrationMemlock);
}
qemuDomainSaveStatus(vm);
@@ -4615,7 +4618,8 @@ qemuMigrationSrcStart(virDomainObj *vm,
case MIGRATION_DEST_HOST:
if (STREQ(spec->dest.host.protocol, "rdma") &&
vm->def->mem.hard_limit > 0 &&
- qemuDomainSetMaxMemLock(vm, vm->def->mem.hard_limit << 10, NULL)
< 0) {
+ qemuDomainSetMaxMemLock(vm, vm->def->mem.hard_limit << 10,
+ &priv->preMigrationMemlock) < 0) {
return -1;
}
return qemuMonitorMigrateToHost(priv->mon, migrateFlags,
@@ -6155,6 +6159,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriver *driver,
qemuMigrationSrcRestoreDomainState(driver, vm);
qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->migParams, priv->job.apiFlags);
+ qemuDomainSetMaxMemLock(vm, 0, &priv->preMigrationMemlock);
qemuMigrationJobFinish(vm);
} else {
if (ret < 0)
@@ -6410,7 +6415,7 @@ qemuMigrationDstComplete(virQEMUDriver *driver,
job->apiFlags);
virPortAllocatorRelease(priv->migrationPort);
- priv->migrationPort = 0;
+ qemuDomainSetMaxMemLock(vm, 0, &priv->preMigrationMemlock);
}
--
2.35.1