[libvirt] [PATCH v4 0/5] Add support for migration event

The final version of migration event patches which got accepted upstream had to be slightly modified -- the event is only emitted when "events" migration capability is turned on (default is off). Thus a new patch had to be added to libvirt. I'm resending all migration event patches for context even though they haven't changed at all since they were ACKed. Jiri Denemark (5): qemu_monitor: Wire up MIGRATION event qemu: Enable migration events on QMP monitor qemuDomainGetJobStatsInternal: Support migration events qemu: Update migration state according to MIGRATION event qemu: Wait for migration events on domain condition src/qemu/qemu_capabilities.c | 2 ++ src/qemu/qemu_capabilities.h | 1 + src/qemu/qemu_domain.h | 3 ++ src/qemu/qemu_driver.c | 27 ++++++++++++--- src/qemu/qemu_migration.c | 59 +++++++++++++++++++++++++++----- src/qemu/qemu_monitor.c | 16 ++++++++- src/qemu/qemu_monitor.h | 9 +++++ src/qemu/qemu_monitor_json.c | 23 +++++++++++++ src/qemu/qemu_process.c | 80 +++++++++++++++++++++++++++++++++++++------- 9 files changed, 195 insertions(+), 25 deletions(-) -- 2.4.5

Thanks to Juan's work QEMU finally emits an event whenever migration state changes. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- Notes: This event is supported in QEMU 2.4-rc0 ACKed in version 3 Version 4: - no change Version 3: - rebased (context conflict in qemu_capabilities.[ch]) Version 2: - new patch src/qemu/qemu_capabilities.c | 2 ++ src/qemu/qemu_capabilities.h | 1 + src/qemu/qemu_monitor.c | 14 ++++++++++++++ src/qemu/qemu_monitor.h | 8 ++++++++ src/qemu/qemu_monitor_json.c | 23 +++++++++++++++++++++++ 5 files changed, 48 insertions(+) diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c index 27686c3..2b33935 100644 --- a/src/qemu/qemu_capabilities.c +++ b/src/qemu/qemu_capabilities.c @@ -287,6 +287,7 @@ VIR_ENUM_IMPL(virQEMUCaps, QEMU_CAPS_LAST, "aarch64-off", "vhost-user-multiqueue", /* 190 */ + "migration-event", ); @@ -1505,6 +1506,7 @@ struct virQEMUCapsStringFlags virQEMUCapsEvents[] = { { "BALLOON_CHANGE", QEMU_CAPS_BALLOON_EVENT }, { "SPICE_MIGRATE_COMPLETED", QEMU_CAPS_SEAMLESS_MIGRATION }, { "DEVICE_DELETED", QEMU_CAPS_DEVICE_DEL_EVENT }, + { "MIGRATION", QEMU_CAPS_MIGRATION_EVENT }, }; struct virQEMUCapsStringFlags virQEMUCapsObjectTypes[] = { diff --git a/src/qemu/qemu_capabilities.h b/src/qemu/qemu_capabilities.h index 30aa504..f77bd06 100644 --- a/src/qemu/qemu_capabilities.h +++ b/src/qemu/qemu_capabilities.h @@ -230,6 +230,7 @@ typedef enum { QEMU_CAPS_DEVICE_PCI_SERIAL = 188, /* -device pci-serial */ QEMU_CAPS_CPU_AARCH64_OFF = 189, /* -cpu ...,aarch64=off */ QEMU_CAPS_VHOSTUSER_MULTIQUEUE = 190, /* vhost-user with -netdev queues= */ + QEMU_CAPS_MIGRATION_EVENT = 191, /* MIGRATION event */ QEMU_CAPS_LAST, /* this must always be the last item */ } virQEMUCapsFlags; diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index 93fcc7f..fb325b6 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -1498,6 +1498,20 @@ qemuMonitorEmitSpiceMigrated(qemuMonitorPtr mon) int +qemuMonitorEmitMigrationStatus(qemuMonitorPtr mon, + int status) +{ + int ret = -1; + VIR_DEBUG("mon=%p, status=%s", + mon, NULLSTR(qemuMonitorMigrationStatusTypeToString(status))); + + QEMU_MONITOR_CALLBACK(mon, ret, domainMigrationStatus, mon->vm, status); + + return ret; +} + + +int qemuMonitorSetCapabilities(qemuMonitorPtr mon) { QEMU_CHECK_MONITOR(mon); diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index ec1724f..8555f7b 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -186,6 +186,11 @@ typedef int (*qemuMonitorDomainSpiceMigratedCallback)(qemuMonitorPtr mon, virDomainObjPtr vm, void *opaque); +typedef int (*qemuMonitorDomainMigrationStatusCallback)(qemuMonitorPtr mon, + virDomainObjPtr vm, + int status, + void *opaque); + typedef struct _qemuMonitorCallbacks qemuMonitorCallbacks; typedef qemuMonitorCallbacks *qemuMonitorCallbacksPtr; struct _qemuMonitorCallbacks { @@ -214,6 +219,7 @@ struct _qemuMonitorCallbacks { qemuMonitorDomainNicRxFilterChangedCallback domainNicRxFilterChanged; qemuMonitorDomainSerialChangeCallback domainSerialChange; qemuMonitorDomainSpiceMigratedCallback domainSpiceMigrated; + qemuMonitorDomainMigrationStatusCallback domainMigrationStatus; }; char *qemuMonitorEscapeArg(const char *in); @@ -313,6 +319,8 @@ int qemuMonitorEmitSerialChange(qemuMonitorPtr mon, const char *devAlias, bool connected); int qemuMonitorEmitSpiceMigrated(qemuMonitorPtr mon); +int qemuMonitorEmitMigrationStatus(qemuMonitorPtr mon, + int status); int qemuMonitorStartCPUs(qemuMonitorPtr mon, virConnectPtr conn); diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index f0cfc57..cc4c92b 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -84,6 +84,7 @@ static void qemuMonitorJSONHandleDeviceDeleted(qemuMonitorPtr mon, virJSONValueP static void qemuMonitorJSONHandleNicRxFilterChanged(qemuMonitorPtr mon, virJSONValuePtr data); static void qemuMonitorJSONHandleSerialChange(qemuMonitorPtr mon, virJSONValuePtr data); static void qemuMonitorJSONHandleSpiceMigrated(qemuMonitorPtr mon, virJSONValuePtr data); +static void qemuMonitorJSONHandleMigrationStatus(qemuMonitorPtr mon, virJSONValuePtr data); typedef struct { const char *type; @@ -99,6 +100,7 @@ static qemuEventHandler eventHandlers[] = { { "DEVICE_DELETED", qemuMonitorJSONHandleDeviceDeleted, }, { "DEVICE_TRAY_MOVED", qemuMonitorJSONHandleTrayChange, }, { "GUEST_PANICKED", qemuMonitorJSONHandleGuestPanic, }, + { "MIGRATION", qemuMonitorJSONHandleMigrationStatus, }, { "NIC_RX_FILTER_CHANGED", qemuMonitorJSONHandleNicRxFilterChanged, }, { "POWERDOWN", qemuMonitorJSONHandlePowerdown, }, { "RESET", qemuMonitorJSONHandleReset, }, @@ -984,6 +986,27 @@ qemuMonitorJSONHandleSpiceMigrated(qemuMonitorPtr mon, } +static void +qemuMonitorJSONHandleMigrationStatus(qemuMonitorPtr mon, + virJSONValuePtr data) +{ + const char *str; + int status; + + if (!(str = virJSONValueObjectGetString(data, "status"))) { + VIR_WARN("missing status in migration event"); + return; + } + + if ((status = qemuMonitorMigrationStatusTypeFromString(str)) == -1) { + VIR_WARN("unknown status '%s' in migration event", str); + return; + } + + qemuMonitorEmitMigrationStatus(mon, status); +} + + int qemuMonitorJSONHumanCommandWithFd(qemuMonitorPtr mon, const char *cmd_str, -- 2.4.5

Even if QEMU supports migration events it doesn't send them by default. We have to enable them by calling migrate-set-capabilities. Let's enable migration events everytime we can and clear QEMU_CAPS_MIGRATION_EVENT in case migrate-set-capabilities does not support events. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- Notes: Version 4: - new patch src/qemu/qemu_monitor.c | 2 +- src/qemu/qemu_monitor.h | 1 + src/qemu/qemu_process.c | 46 ++++++++++++++++++++++++++++++++++------------ 3 files changed, 36 insertions(+), 13 deletions(-) diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index fb325b6..54695c2 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -163,7 +163,7 @@ VIR_ENUM_IMPL(qemuMonitorMigrationStatus, VIR_ENUM_IMPL(qemuMonitorMigrationCaps, QEMU_MONITOR_MIGRATION_CAPS_LAST, - "xbzrle", "auto-converge", "rdma-pin-all") + "xbzrle", "auto-converge", "rdma-pin-all", "events") VIR_ENUM_IMPL(qemuMonitorVMStatus, QEMU_MONITOR_VM_STATUS_LAST, diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 8555f7b..ab7d5a7 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -512,6 +512,7 @@ typedef enum { QEMU_MONITOR_MIGRATION_CAPS_XBZRLE, QEMU_MONITOR_MIGRATION_CAPS_AUTO_CONVERGE, QEMU_MONITOR_MIGRATION_CAPS_RDMA_PIN_ALL, + QEMU_MONITOR_MIGRATION_CAPS_EVENTS, QEMU_MONITOR_MIGRATION_CAPS_LAST } qemuMonitorMigrationCaps; diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index ba84182..04e7e93 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -1546,7 +1546,7 @@ qemuConnectMonitor(virQEMUDriverPtr driver, virDomainObjPtr vm, int asyncJob, vm->def) < 0) { VIR_ERROR(_("Failed to set security context for monitor for %s"), vm->def->name); - goto error; + return -1; } /* Hold an extra reference because we can't allow 'vm' to be @@ -1578,26 +1578,48 @@ qemuConnectMonitor(virQEMUDriverPtr driver, virDomainObjPtr vm, int asyncJob, if (virSecurityManagerClearSocketLabel(driver->securityManager, vm->def) < 0) { VIR_ERROR(_("Failed to clear security context for monitor for %s"), vm->def->name); - goto error; + return -1; } if (priv->mon == NULL) { VIR_INFO("Failed to connect monitor for %s", vm->def->name); - goto error; + return -1; } if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) - goto error; - ret = qemuMonitorSetCapabilities(priv->mon); - if (ret == 0 && - virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MONITOR_JSON)) - ret = virQEMUCapsProbeQMP(priv->qemuCaps, priv->mon); + return -1; + + if (qemuMonitorSetCapabilities(priv->mon) < 0) + goto cleanup; + + if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MONITOR_JSON) && + virQEMUCapsProbeQMP(priv->qemuCaps, priv->mon) < 0) + goto cleanup; + + if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT)) { + int rv; + + rv = qemuMonitorGetMigrationCapability( + priv->mon, QEMU_MONITOR_MIGRATION_CAPS_EVENTS); + if (rv < 0) { + goto cleanup; + } else if (rv == 0) { + VIR_DEBUG("Cannot enable migration events; clearing capability"); + virQEMUCapsClear(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); + } else if (qemuMonitorSetMigrationCapability( + priv->mon, + QEMU_MONITOR_MIGRATION_CAPS_EVENTS, + true) < 0) { + goto cleanup; + } + } + + ret = 0; + + cleanup: if (qemuDomainObjExitMonitor(driver, vm) < 0) - return -1; - - error: - + ret = -1; return ret; } -- 2.4.5

On Wed, Jul 08, 2015 at 18:39:38 +0200, Jiri Denemark wrote:
Even if QEMU supports migration events it doesn't send them by default. We have to enable them by calling migrate-set-capabilities. Let's enable migration events everytime we can and clear QEMU_CAPS_MIGRATION_EVENT in case migrate-set-capabilities does not support events.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> ---
Notes: Version 4: - new patch
src/qemu/qemu_monitor.c | 2 +- src/qemu/qemu_monitor.h | 1 + src/qemu/qemu_process.c | 46 ++++++++++++++++++++++++++++++++++------------ 3 files changed, 36 insertions(+), 13 deletions(-)
diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index fb325b6..54695c2 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -163,7 +163,7 @@ VIR_ENUM_IMPL(qemuMonitorMigrationStatus,
VIR_ENUM_IMPL(qemuMonitorMigrationCaps, QEMU_MONITOR_MIGRATION_CAPS_LAST, - "xbzrle", "auto-converge", "rdma-pin-all") + "xbzrle", "auto-converge", "rdma-pin-all", "events")
VIR_ENUM_IMPL(qemuMonitorVMStatus, QEMU_MONITOR_VM_STATUS_LAST, diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 8555f7b..ab7d5a7 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -512,6 +512,7 @@ typedef enum { QEMU_MONITOR_MIGRATION_CAPS_XBZRLE, QEMU_MONITOR_MIGRATION_CAPS_AUTO_CONVERGE, QEMU_MONITOR_MIGRATION_CAPS_RDMA_PIN_ALL, + QEMU_MONITOR_MIGRATION_CAPS_EVENTS,
QEMU_MONITOR_MIGRATION_CAPS_LAST } qemuMonitorMigrationCaps; diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index ba84182..04e7e93 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -1546,7 +1546,7 @@ qemuConnectMonitor(virQEMUDriverPtr driver, virDomainObjPtr vm, int asyncJob, vm->def) < 0) { VIR_ERROR(_("Failed to set security context for monitor for %s"), vm->def->name); - goto error; + return -1; }
/* Hold an extra reference because we can't allow 'vm' to be @@ -1578,26 +1578,48 @@ qemuConnectMonitor(virQEMUDriverPtr driver, virDomainObjPtr vm, int asyncJob, if (virSecurityManagerClearSocketLabel(driver->securityManager, vm->def) < 0) { VIR_ERROR(_("Failed to clear security context for monitor for %s"), vm->def->name); - goto error; + return -1; }
if (priv->mon == NULL) { VIR_INFO("Failed to connect monitor for %s", vm->def->name); - goto error; + return -1; }
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) - goto error; - ret = qemuMonitorSetCapabilities(priv->mon); - if (ret == 0 && - virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MONITOR_JSON)) - ret = virQEMUCapsProbeQMP(priv->qemuCaps, priv->mon); + return -1; + + if (qemuMonitorSetCapabilities(priv->mon) < 0) + goto cleanup; + + if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MONITOR_JSON) && + virQEMUCapsProbeQMP(priv->qemuCaps, priv->mon) < 0) + goto cleanup; + + if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT)) { + int rv; + + rv = qemuMonitorGetMigrationCapability( + priv->mon, QEMU_MONITOR_MIGRATION_CAPS_EVENTS);
Is it actually worth querying for the capability? Wouldn't it be sufficient to just set it and if it fails, clear the capability bit right away?
+ if (rv < 0) { + goto cleanup; + } else if (rv == 0) { + VIR_DEBUG("Cannot enable migration events; clearing capability"); + virQEMUCapsClear(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); + } else if (qemuMonitorSetMigrationCapability( + priv->mon, + QEMU_MONITOR_MIGRATION_CAPS_EVENTS, + true) < 0) {
Hmm, this formatting looks rather ugly to me. Do we still need to do this silly 80 column limit in the age of wide angle displays?
+ goto cleanup; + } + } + + ret = 0; + + cleanup: if (qemuDomainObjExitMonitor(driver, vm) < 0) - return -1; - - error: - + ret = -1; return ret;
Peter

Even if QEMU supports migration events it doesn't send them by default. We have to enable them by calling migrate-set-capabilities. Let's enable migration events everytime we can and clear QEMU_CAPS_MIGRATION_EVENT in case migrate-set-capabilities does not support events. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- Notes: Version 5: - simplified Version 4: - new patch src/qemu/qemu_monitor.c | 2 +- src/qemu/qemu_monitor.h | 1 + src/qemu/qemu_process.c | 36 ++++++++++++++++++++++++------------ 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index fb325b6..54695c2 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -163,7 +163,7 @@ VIR_ENUM_IMPL(qemuMonitorMigrationStatus, VIR_ENUM_IMPL(qemuMonitorMigrationCaps, QEMU_MONITOR_MIGRATION_CAPS_LAST, - "xbzrle", "auto-converge", "rdma-pin-all") + "xbzrle", "auto-converge", "rdma-pin-all", "events") VIR_ENUM_IMPL(qemuMonitorVMStatus, QEMU_MONITOR_VM_STATUS_LAST, diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 8555f7b..ab7d5a7 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -512,6 +512,7 @@ typedef enum { QEMU_MONITOR_MIGRATION_CAPS_XBZRLE, QEMU_MONITOR_MIGRATION_CAPS_AUTO_CONVERGE, QEMU_MONITOR_MIGRATION_CAPS_RDMA_PIN_ALL, + QEMU_MONITOR_MIGRATION_CAPS_EVENTS, QEMU_MONITOR_MIGRATION_CAPS_LAST } qemuMonitorMigrationCaps; diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index ba84182..1d223d3 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -1546,7 +1546,7 @@ qemuConnectMonitor(virQEMUDriverPtr driver, virDomainObjPtr vm, int asyncJob, vm->def) < 0) { VIR_ERROR(_("Failed to set security context for monitor for %s"), vm->def->name); - goto error; + return -1; } /* Hold an extra reference because we can't allow 'vm' to be @@ -1578,26 +1578,38 @@ qemuConnectMonitor(virQEMUDriverPtr driver, virDomainObjPtr vm, int asyncJob, if (virSecurityManagerClearSocketLabel(driver->securityManager, vm->def) < 0) { VIR_ERROR(_("Failed to clear security context for monitor for %s"), vm->def->name); - goto error; + return -1; } if (priv->mon == NULL) { VIR_INFO("Failed to connect monitor for %s", vm->def->name); - goto error; + return -1; } if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) - goto error; - ret = qemuMonitorSetCapabilities(priv->mon); - if (ret == 0 && - virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MONITOR_JSON)) - ret = virQEMUCapsProbeQMP(priv->qemuCaps, priv->mon); + return -1; + + if (qemuMonitorSetCapabilities(priv->mon) < 0) + goto cleanup; + + if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MONITOR_JSON) && + virQEMUCapsProbeQMP(priv->qemuCaps, priv->mon) < 0) + goto cleanup; + + if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT) && + qemuMonitorSetMigrationCapability(priv->mon, + QEMU_MONITOR_MIGRATION_CAPS_EVENTS, + true) < 0) { + VIR_DEBUG("Cannot enable migration events; clearing capability"); + virQEMUCapsClear(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); + } + + ret = 0; + + cleanup: if (qemuDomainObjExitMonitor(driver, vm) < 0) - return -1; - - error: - + ret = -1; return ret; } -- 2.4.5

On Thu, Jul 09, 2015 at 09:11:16 +0200, Jiri Denemark wrote:
Even if QEMU supports migration events it doesn't send them by default. We have to enable them by calling migrate-set-capabilities. Let's enable migration events everytime we can and clear QEMU_CAPS_MIGRATION_EVENT in case migrate-set-capabilities does not support events.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> ---
Notes: Version 5: - simplified
Version 4: - new patch
src/qemu/qemu_monitor.c | 2 +- src/qemu/qemu_monitor.h | 1 + src/qemu/qemu_process.c | 36 ++++++++++++++++++++++++------------ 3 files changed, 26 insertions(+), 13 deletions(-)
ACK, Peter

When QEMU supports migration events the qemuDomainJobInfo structure will no longer be updated with migration statistics. We have to enter a job and explicitly ask QEMU every time virDomainGetJob{Info,Stats} is called. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- Notes: ACKed in version 3 Version 4: - no change Version 3: - moved before "qemu: Update migration state according to MIGRATION event" Version 2: - new patch src/qemu/qemu_driver.c | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 900740e..229ccde 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -12939,15 +12939,27 @@ qemuConnectBaselineCPU(virConnectPtr conn ATTRIBUTE_UNUSED, static int -qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver ATTRIBUTE_UNUSED, +qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, virDomainObjPtr vm, bool completed, qemuDomainJobInfoPtr jobInfo) { qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainJobInfoPtr info; + bool fetch = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); int ret = -1; + if (completed) + fetch = false; + + /* Do not ask QEMU if migration is not even running yet */ + if (!priv->job.current || !priv->job.current->status.status) + fetch = false; + + if (fetch && + qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + return -1; + if (!completed && !virDomainObjIsActive(vm)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", @@ -12968,12 +12980,19 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver ATTRIBUTE_UNUSED, *jobInfo = *info; if (jobInfo->type == VIR_DOMAIN_JOB_BOUNDED || - jobInfo->type == VIR_DOMAIN_JOB_UNBOUNDED) - ret = qemuDomainJobInfoUpdateTime(jobInfo); - else + jobInfo->type == VIR_DOMAIN_JOB_UNBOUNDED) { + if (fetch) + ret = qemuMigrationFetchJobStatus(driver, vm, QEMU_ASYNC_JOB_NONE, + jobInfo); + else + ret = qemuDomainJobInfoUpdateTime(jobInfo); + } else { ret = 0; + } cleanup: + if (fetch) + qemuDomainObjEndJob(driver, vm); return ret; } -- 2.4.5

We don't need to call query-migrate every 50ms when we get the current migration state via MIGRATION event. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- Notes: ACKed in vesrion 2 Version 4: - no change Version 3: - no change Version 2: - new patch src/qemu/qemu_migration.c | 14 ++++++++++++-- src/qemu/qemu_process.c | 31 +++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 2 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index a57a177..9a50923 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -2552,7 +2552,11 @@ qemuMigrationCheckJobStatus(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainJobInfoPtr jobInfo = priv->job.current; - if (qemuMigrationUpdateJobStatus(driver, vm, asyncJob) < 0) + bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); + + if (events) + qemuMigrationUpdateJobType(jobInfo); + else if (qemuMigrationUpdateJobStatus(driver, vm, asyncJob) < 0) return -1; switch (jobInfo->type) { @@ -2571,9 +2575,15 @@ qemuMigrationCheckJobStatus(virQEMUDriverPtr driver, qemuMigrationJobName(vm), _("canceled by client")); return -1; + case VIR_DOMAIN_JOB_COMPLETED: + /* Fetch statistics of a completed migration */ + if (events && + qemuMigrationUpdateJobStatus(driver, vm, asyncJob) < 0) + return -1; + break; + case VIR_DOMAIN_JOB_BOUNDED: case VIR_DOMAIN_JOB_UNBOUNDED: - case VIR_DOMAIN_JOB_COMPLETED: case VIR_DOMAIN_JOB_LAST: break; } diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 04e7e93..2cd6a52 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -1508,6 +1508,36 @@ qemuProcessHandleSpiceMigrated(qemuMonitorPtr mon ATTRIBUTE_UNUSED, } +static int +qemuProcessHandleMigrationStatus(qemuMonitorPtr mon ATTRIBUTE_UNUSED, + virDomainObjPtr vm, + int status, + void *opaque ATTRIBUTE_UNUSED) +{ + qemuDomainObjPrivatePtr priv; + + virObjectLock(vm); + + VIR_DEBUG("Migration of domain %p %s changed state to %s", + vm, vm->def->name, + qemuMonitorMigrationStatusTypeToString(status)); + + priv = vm->privateData; + if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT && + priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_IN) { + VIR_DEBUG("got MIGRATION event without a migration job"); + goto cleanup; + } + + priv->job.current->status.status = status; + virDomainObjSignal(vm); + + cleanup: + virObjectUnlock(vm); + return 0; +} + + static qemuMonitorCallbacks monitorCallbacks = { .eofNotify = qemuProcessHandleMonitorEOF, .errorNotify = qemuProcessHandleMonitorError, @@ -1532,6 +1562,7 @@ static qemuMonitorCallbacks monitorCallbacks = { .domainNicRxFilterChanged = qemuProcessHandleNicRxFilterChanged, .domainSerialChange = qemuProcessHandleSerialChanged, .domainSpiceMigrated = qemuProcessHandleSpiceMigrated, + .domainMigrationStatus = qemuProcessHandleMigrationStatus, }; static int -- 2.4.5

Since we already support the MIGRATION event, we just need to make sure the domain condition is signalled whenever a p2p connection drops or the domain is paused due to IO error and we can avoid waking up every 50 ms to check whether something happened. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- Notes: ACKed in version 2 Version 4: - no change Version 3: - no change Version 2: - new patch src/qemu/qemu_domain.h | 3 +++ src/qemu/qemu_migration.c | 45 +++++++++++++++++++++++++++++++++++++++------ src/qemu/qemu_process.c | 3 +++ 3 files changed, 45 insertions(+), 6 deletions(-) diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index 66dbcf5..2af7c59 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -199,6 +199,9 @@ struct _qemuDomainObjPrivate { /* Bitmaps below hold data from the auto NUMA feature */ virBitmapPtr autoNodeset; virBitmapPtr autoCpuset; + + bool signalIOError; /* true if the domain condition should be signalled on + I/O error */ }; # define QEMU_DOMAIN_DISK_PRIVATE(disk) \ diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 9a50923..fa8c7d2 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -2661,20 +2661,28 @@ qemuMigrationWaitForCompletion(virQEMUDriverPtr driver, { qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainJobInfoPtr jobInfo = priv->job.current; + bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); int rv; jobInfo->type = VIR_DOMAIN_JOB_UNBOUNDED; while ((rv = qemuMigrationCompleted(driver, vm, asyncJob, dconn, abort_on_error, storage)) != 1) { - /* Poll every 50ms for progress & to allow cancellation */ - struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull }; - if (rv < 0) return rv; - virObjectUnlock(vm); - nanosleep(&ts, NULL); - virObjectLock(vm); + if (events) { + if (virDomainObjWait(vm) < 0) { + jobInfo->type = VIR_DOMAIN_JOB_FAILED; + return -2; + } + } else { + /* Poll every 50ms for progress & to allow cancellation */ + struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull }; + + virObjectUnlock(vm); + nanosleep(&ts, NULL); + virObjectLock(vm); + } } qemuDomainJobInfoUpdateDowntime(jobInfo); @@ -4148,6 +4156,7 @@ qemuMigrationRun(virQEMUDriverPtr driver, virErrorPtr orig_err = NULL; unsigned int cookieFlags = 0; bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR); + bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); int rc; VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, " @@ -4178,6 +4187,9 @@ qemuMigrationRun(virQEMUDriverPtr driver, return -1; } + if (events) + priv->signalIOError = abort_on_error; + mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, cookieFlags | QEMU_MIGRATION_COOKIE_GRAPHICS); if (!mig) @@ -4387,6 +4399,9 @@ qemuMigrationRun(virQEMUDriverPtr driver, qemuMigrationCookieFree(mig); + if (events) + priv->signalIOError = false; + if (orig_err) { virSetError(orig_err); virFreeError(orig_err); @@ -5029,6 +5044,18 @@ doPeer2PeerMigrate3(virQEMUDriverPtr driver, } +static void +qemuMigrationConnectionClosed(virConnectPtr conn, + int reason, + void *opaque) +{ + virDomainObjPtr vm = opaque; + + VIR_DEBUG("conn=%p, reason=%d, vm=%s", conn, reason, vm->def->name); + virDomainObjSignal(vm); +} + + static int virConnectCredType[] = { VIR_CRED_AUTHNAME, VIR_CRED_PASSPHRASE, @@ -5104,6 +5131,11 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver, cfg->keepAliveCount) < 0) goto cleanup; + if (virConnectRegisterCloseCallback(dconn, qemuMigrationConnectionClosed, + vm, NULL) < 0) { + goto cleanup; + } + qemuDomainObjEnterRemote(vm); p2p = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, VIR_DRV_FEATURE_MIGRATION_P2P); @@ -5169,6 +5201,7 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver, cleanup: orig_err = virSaveLastError(); qemuDomainObjEnterRemote(vm); + virConnectUnregisterCloseCallback(dconn, qemuMigrationConnectionClosed); virObjectUnref(dconn); qemuDomainObjExitRemote(vm); if (orig_err) { diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 2cd6a52..db44a44 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -952,6 +952,9 @@ qemuProcessHandleIOError(qemuMonitorPtr mon ATTRIBUTE_UNUSED, qemuDomainObjPrivatePtr priv = vm->privateData; VIR_DEBUG("Transitioned guest %s to paused state due to IO error", vm->def->name); + if (priv->signalIOError) + virDomainObjSignal(vm); + virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_IOERROR); lifecycleEvent = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, -- 2.4.5

On Wed, Jul 08, 2015 at 18:39:36 +0200, Jiri Denemark wrote:
The final version of migration event patches which got accepted upstream had to be slightly modified -- the event is only emitted when "events" migration capability is turned on (default is off). Thus a new patch had to be added to libvirt. I'm resending all migration event patches for context even though they haven't changed at all since they were ACKed.
Jiri Denemark (5): qemu_monitor: Wire up MIGRATION event qemu: Enable migration events on QMP monitor qemuDomainGetJobStatsInternal: Support migration events qemu: Update migration state according to MIGRATION event qemu: Wait for migration events on domain condition
I did s/virDomainObjSignal/virDomainObjBroadcast/ required after Pavel's series and pushed these patches. Thanks for the reviews. Jirka
participants (2)
-
Jiri Denemark
-
Peter Krempa