QEMU keeps guest CPUs running even in postcopy-paused migration state so
that processes that already have all memory pages they need migrated to
the destination can keep running. However, this behavior might bring
unexpected delays in interprocess communication as some processes will
be stopped until migration is recover and their memory pages migrated.
So let's make sure all guest CPUs are paused while postcopy migration is
paused.
---
Notes:
Version 2:
- new patch
- this patch does not currently work as QEMU cannot handle "stop"
QMP command while in postcopy-paused state... the monitor just
hangs (see
https://gitlab.com/qemu-project/qemu/-/issues/1052 )
- an ideal solution of the QEMU bug would be if QEMU itself paused
the CPUs for us and we just got notified about it via QMP events
- but Peter Xu thinks this behavior is actually worse than keeping
vCPUs running
- so let's take this patch as a base for discussing what we should
be doing with vCPUs in postcopy-paused migration state
src/qemu/qemu_domain.c | 1 +
src/qemu/qemu_domain.h | 1 +
src/qemu/qemu_driver.c | 30 +++++++++++++++++++++++++
src/qemu/qemu_migration.c | 47 +++++++++++++++++++++++++++++++++++++++
src/qemu/qemu_migration.h | 6 +++++
src/qemu/qemu_process.c | 32 ++++++++++++++++++++++++++
6 files changed, 117 insertions(+)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index d04ec6cd0c..dcd6d5e1b5 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -11115,6 +11115,7 @@ qemuProcessEventFree(struct qemuProcessEvent *event)
break;
case QEMU_PROCESS_EVENT_PR_DISCONNECT:
case QEMU_PROCESS_EVENT_UNATTENDED_MIGRATION:
+ case QEMU_PROCESS_EVENT_MIGRATION_CPU_STATE:
case QEMU_PROCESS_EVENT_LAST:
break;
}
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 153dfe3a23..f5cdb2235f 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -427,6 +427,7 @@ typedef enum {
QEMU_PROCESS_EVENT_GUEST_CRASHLOADED,
QEMU_PROCESS_EVENT_MEMORY_DEVICE_SIZE_CHANGE,
QEMU_PROCESS_EVENT_UNATTENDED_MIGRATION,
+ QEMU_PROCESS_EVENT_MIGRATION_CPU_STATE,
QEMU_PROCESS_EVENT_LAST
} qemuProcessEventType;
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 637106f1b3..d0498ef2aa 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -4255,6 +4255,33 @@ processMemoryDeviceSizeChange(virQEMUDriver *driver,
}
+static void
+processMigrationCPUState(virDomainObj *vm,
+ virDomainState state,
+ int reason)
+{
+ qemuDomainObjPrivate *priv = vm->privateData;
+ virQEMUDriver *driver = priv->driver;
+
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MIGRATION_SAFE) < 0)
+ return;
+
+ if (!virDomainObjIsActive(vm)) {
+ VIR_DEBUG("Domain '%s' is not running", vm->def->name);
+ goto endjob;
+ }
+
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN &&
+ virDomainObjIsPostcopy(vm, VIR_DOMAIN_JOB_OPERATION_MIGRATION_IN)) {
+ qemuMigrationUpdatePostcopyCPUState(vm, state, reason,
+ VIR_ASYNC_JOB_NONE);
+ }
+
+ endjob:
+ qemuDomainObjEndJob(vm);
+}
+
+
static void qemuProcessEventHandler(void *data, void *opaque)
{
struct qemuProcessEvent *processEvent = data;
@@ -4312,6 +4339,9 @@ static void qemuProcessEventHandler(void *data, void *opaque)
processEvent->action,
processEvent->status);
break;
+ case QEMU_PROCESS_EVENT_MIGRATION_CPU_STATE:
+ processMigrationCPUState(vm, processEvent->action, processEvent->status);
+ break;
case QEMU_PROCESS_EVENT_LAST:
break;
}
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 0314fb1148..58d7009363 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -6831,6 +6831,53 @@ qemuMigrationProcessUnattended(virQEMUDriver *driver,
}
+void
+qemuMigrationUpdatePostcopyCPUState(virDomainObj *vm,
+ virDomainState state,
+ int reason,
+ int asyncJob)
+{
+ virQEMUDriver *driver = QEMU_DOMAIN_PRIVATE(vm)->driver;
+ int current;
+
+ if (state == VIR_DOMAIN_PAUSED) {
+ VIR_DEBUG("Post-copy migration of domain '%s' was paused, stopping
guest CPUs",
+ vm->def->name);
+ } else {
+ VIR_DEBUG("Post-copy migration of domain '%s' was resumed, starting
guest CPUs",
+ vm->def->name);
+ }
+
+ if (virDomainObjGetState(vm, ¤t) == state) {
+ int eventType = -1;
+ int eventDetail = -1;
+
+ if (current == reason) {
+ VIR_DEBUG("Guest CPUs are already in the right state");
+ return;
+ }
+
+ VIR_DEBUG("Fixing domain state reason");
+ if (state == VIR_DOMAIN_PAUSED) {
+ eventType = VIR_DOMAIN_EVENT_SUSPENDED;
+ eventDetail = qemuDomainPausedReasonToSuspendedEvent(reason);
+ } else {
+ eventType = VIR_DOMAIN_EVENT_RESUMED;
+ eventDetail = qemuDomainRunningReasonToResumeEvent(reason);
+ }
+ virDomainObjSetState(vm, state, reason);
+ qemuDomainSaveStatus(vm);
+ virObjectEventStateQueue(driver->domainEventState,
+ virDomainEventLifecycleNewFromObj(vm, eventType,
+ eventDetail));
+ } else if (state == VIR_DOMAIN_PAUSED) {
+ qemuProcessStopCPUs(driver, vm, reason, asyncJob);
+ } else {
+ qemuProcessStartCPUs(driver, vm, reason, asyncJob);
+ }
+}
+
+
/* Helper function called while vm is active. */
int
qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index fbc0549b34..a1e2d8d171 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -224,6 +224,12 @@ qemuMigrationProcessUnattended(virQEMUDriver *driver,
virDomainAsyncJob job,
qemuMonitorMigrationStatus status);
+void
+qemuMigrationUpdatePostcopyCPUState(virDomainObj *vm,
+ virDomainState state,
+ int reason,
+ int asyncJob);
+
bool
qemuMigrationSrcIsAllowed(virQEMUDriver *driver,
virDomainObj *vm,
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index ad529dabb4..7fff68c0db 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -1521,6 +1521,10 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
* Thus we need to handle the event here. */
qemuMigrationSrcPostcopyFailed(vm);
qemuDomainSaveStatus(vm);
+ } else if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
+ qemuProcessEventSubmit(vm, QEMU_PROCESS_EVENT_MIGRATION_CPU_STATE,
+ VIR_DOMAIN_PAUSED,
+ VIR_DOMAIN_PAUSED_POSTCOPY_FAILED, NULL);
}
break;
@@ -1547,6 +1551,12 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
event = virDomainEventLifecycleNewFromObj(vm, eventType, eventDetail);
qemuDomainSaveStatus(vm);
}
+
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
+ qemuProcessEventSubmit(vm, QEMU_PROCESS_EVENT_MIGRATION_CPU_STATE,
+ VIR_DOMAIN_RUNNING,
+ VIR_DOMAIN_RUNNING_POSTCOPY, NULL);
+ }
break;
case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
@@ -3703,10 +3713,32 @@ qemuProcessRecoverMigration(virQEMUDriver *driver,
if (migStatus == VIR_DOMAIN_JOB_STATUS_POSTCOPY) {
VIR_DEBUG("Post-copy migration of domain %s still running, it will be
handled as unattended",
vm->def->name);
+
+ if (job->asyncJob == VIR_ASYNC_JOB_MIGRATION_IN &&
+ state == VIR_DOMAIN_PAUSED) {
+ qemuMigrationUpdatePostcopyCPUState(vm, VIR_DOMAIN_RUNNING,
+ VIR_DOMAIN_RUNNING_POSTCOPY,
+ VIR_ASYNC_JOB_NONE);
+ } else {
+ if (state == VIR_DOMAIN_RUNNING)
+ reason = VIR_DOMAIN_RUNNING_POSTCOPY;
+ else
+ reason = VIR_DOMAIN_PAUSED_POSTCOPY;
+
+ virDomainObjSetState(vm, state, reason);
+ }
+
qemuProcessRestoreMigrationJob(vm, job);
return 0;
}
+ if (job->asyncJob == VIR_ASYNC_JOB_MIGRATION_IN &&
+ migStatus == VIR_DOMAIN_JOB_STATUS_POSTCOPY_PAUSED) {
+ qemuMigrationUpdatePostcopyCPUState(vm, VIR_DOMAIN_PAUSED,
+ VIR_DOMAIN_PAUSED_POSTCOPY,
+ VIR_ASYNC_JOB_NONE);
+ }
+
if (migStatus != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED) {
if (job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
qemuMigrationSrcPostcopyFailed(vm);
--
2.35.1