With a very old QEMU which doesn't support events we need to explicitly
call qemuMigrationSetOffline at the end of migration to update our
internal state. On the other hand, if we talk to QEMU using QMP, we
should just wait for the STOP event and let the event handler update the
state and trigger a libvirt event.
Signed-off-by: Jiri Denemark <jdenemar(a)redhat.com>
---
src/qemu/qemu_domain.h | 2 ++
src/qemu/qemu_migration.c | 27 ++++++++++++++++-----------
src/qemu/qemu_process.c | 20 +++++++++++++++-----
3 files changed, 33 insertions(+), 16 deletions(-)
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 7fc4fff..9fd9076 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -203,6 +203,8 @@ struct _qemuDomainObjPrivate {
bool signalIOError; /* true if the domain condition should be signalled on
I/O error */
+ bool signalStop; /* true if the domain condition should be signalled on
+ QMP STOP event */
};
# define QEMU_DOMAIN_DISK_PRIVATE(disk) \
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 51e7125..c927888 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -4563,19 +4563,24 @@ qemuMigrationRun(virQEMUDriverPtr driver,
else if (rc == -1)
goto cleanup;
- /* When migration completed, QEMU will have paused the
- * CPUs for us, but unless we're using the JSON monitor
- * we won't have been notified of this, so might still
- * think we're running. For v2 protocol this doesn't
- * matter because we'll kill the VM soon, but for v3
- * this is important because we stay paused until the
- * confirm3 step, but need to release the lock state
+ /* When migration completed, QEMU will have paused the CPUs for us.
+ * Wait for the STOP event to be processed or explicitly stop CPUs
+ * (for old QEMU which does not send events) to release the lock state.
*/
- if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
- if (qemuMigrationSetOffline(driver, vm) < 0) {
- priv->job.current->type = VIR_DOMAIN_JOB_FAILED;
- goto cleanup;
+ if (priv->monJSON) {
+ while (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
+ priv->signalStop = true;
+ rc = virDomainObjWait(vm);
+ priv->signalStop = false;
+ if (rc < 0) {
+ priv->job.current->type = VIR_DOMAIN_JOB_FAILED;
+ goto cleanup;
+ }
}
+ } else if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING &&
+ qemuMigrationSetOffline(driver, vm) < 0) {
+ priv->job.current->type = VIR_DOMAIN_JOB_FAILED;
+ goto cleanup;
}
ret = 0;
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index d465b4f..cab1aee 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -701,6 +701,8 @@ qemuProcessHandleStop(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
{
virQEMUDriverPtr driver = opaque;
virObjectEventPtr event = NULL;
+ virDomainPausedReason reason = VIR_DOMAIN_PAUSED_UNKNOWN;
+ virDomainEventSuspendedDetailType detail = VIR_DOMAIN_EVENT_SUSPENDED_PAUSED;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
virObjectLock(vm);
@@ -712,16 +714,24 @@ qemuProcessHandleStop(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
goto unlock;
}
- VIR_DEBUG("Transitioned guest %s to paused state",
- vm->def->name);
+ if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) {
+ reason = VIR_DOMAIN_PAUSED_MIGRATION;
+ detail = VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED;
+ }
+
+ VIR_DEBUG("Transitioned guest %s to paused state, reason %s",
+ vm->def->name, virDomainPausedReasonTypeToString(reason));
if (priv->job.current)
ignore_value(virTimeMillisNow(&priv->job.current->stopped));
- virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_UNKNOWN);
+ if (priv->signalStop)
+ virDomainObjBroadcast(vm);
+
+ virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, reason);
event = virDomainEventLifecycleNewFromObj(vm,
- VIR_DOMAIN_EVENT_SUSPENDED,
- VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
+ VIR_DOMAIN_EVENT_SUSPENDED,
+ detail);
VIR_FREE(priv->lockState);
if (virDomainLockProcessPause(driver->lockManager, vm,
&priv->lockState) < 0)
--
2.7.0