[libvirt] [PATCH] Allow suspend during live migration

Currently no command can be sent to a qemu process while another job is active. This patch adds support for signaling long-running jobs (such as migration) so that other threads may request predefined operations to be done during such jobs. Two signals are defined so far: - QEMU_JOB_SIGNAL_CANCEL - QEMU_JOB_SIGNAL_SUSPEND The first one is used by qemuDomainAbortJob. The second one is used by qemudDomainSuspend for suspending a domain during migration, which allows for changing live migration into offline migration. However, there is a small issue in the way qemudDomainSuspend is currently implemented for migrating domains. The API calls returns immediately after signaling migration job which means it is asynchronous in this specific case. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_driver.c | 149 ++++++++++++++++++++++++++++++++--------------- 1 files changed, 101 insertions(+), 48 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index f8ab545..5b2c26a 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -87,14 +87,26 @@ #define VIR_FROM_THIS VIR_FROM_QEMU +/* Only 1 job is allowed at any time + * A job includes *all* monitor commands, even those just querying + * information, not merely actions */ +enum qemuDomainJob { + QEMU_JOB_NONE = 0, /* Always set to 0 for easy if (jobActive) conditions */ + QEMU_JOB_UNSPECIFIED, + QEMU_JOB_MIGRATION, +}; + +enum qemuDomainJobSignals { + QEMU_JOB_SIGNAL_CANCEL = 1 << 0, /* Request job cancellation */ + QEMU_JOB_SIGNAL_SUSPEND = 1 << 1, /* Request VM suspend to finish live migration offline */ +}; + typedef struct _qemuDomainObjPrivate qemuDomainObjPrivate; typedef qemuDomainObjPrivate *qemuDomainObjPrivatePtr; struct _qemuDomainObjPrivate { virCond jobCond; /* Use in conjunction with main virDomainObjPtr lock */ - unsigned int jobActive : 1; /* Non-zero if a job is active. Only 1 job is allowed at any time - * A job includes *all* monitor commands, even those just querying - * information, not merely actions */ - unsigned int jobCancel : 1; /* Non-zero if a cancel request from client has arrived */ + enum qemuDomainJob jobActive; /* Currently running job */ + unsigned int jobSignals; /* Signals for running job */ virDomainJobInfo jobInfo; unsigned long long jobStart; @@ -338,8 +350,8 @@ static int qemuDomainObjBeginJob(virDomainObjPtr obj) return -1; } } - priv->jobActive = 1; - priv->jobCancel = 0; + priv->jobActive = QEMU_JOB_UNSPECIFIED; + priv->jobSignals = 0; priv->jobStart = (now.tv_sec * 1000ull) + (now.tv_usec / 1000); memset(&priv->jobInfo, 0, sizeof(priv->jobInfo)); @@ -385,8 +397,8 @@ static int qemuDomainObjBeginJobWithDriver(struct qemud_driver *driver, return -1; } } - priv->jobActive = 1; - priv->jobCancel = 0; + priv->jobActive = QEMU_JOB_UNSPECIFIED; + priv->jobSignals = 0; priv->jobStart = (now.tv_sec * 1000ull) + (now.tv_usec / 1000); memset(&priv->jobInfo, 0, sizeof(priv->jobInfo)); @@ -410,8 +422,8 @@ static int ATTRIBUTE_RETURN_CHECK qemuDomainObjEndJob(virDomainObjPtr obj) { qemuDomainObjPrivatePtr priv = obj->privateData; - priv->jobActive = 0; - priv->jobCancel = 0; + priv->jobActive = QEMU_JOB_NONE; + priv->jobSignals = 0; priv->jobStart = 0; memset(&priv->jobInfo, 0, sizeof(priv->jobInfo)); virCondSignal(&priv->jobCond); @@ -3560,6 +3572,7 @@ static int qemudDomainSuspend(virDomainPtr dom) { virDomainObjPtr vm; int ret = -1; virDomainEventPtr event = NULL; + qemuDomainObjPrivatePtr priv; qemuDriverLock(driver); vm = virDomainFindByUUID(&driver->domains, dom->uuid); @@ -3571,30 +3584,48 @@ static int qemudDomainSuspend(virDomainPtr dom) { _("no domain with matching uuid '%s'"), uuidstr); goto cleanup; } - if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) - goto cleanup; - if (!virDomainObjIsActive(vm)) { qemuReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); - goto endjob; + goto cleanup; } - if (vm->state != VIR_DOMAIN_PAUSED) { - qemuDomainObjPrivatePtr priv = vm->privateData; - qemuDomainObjEnterMonitorWithDriver(driver, vm); - if (qemuMonitorStopCPUs(priv->mon) < 0) { - qemuDomainObjExitMonitorWithDriver(driver, vm); + + priv = vm->privateData; + + if (priv->jobActive == QEMU_JOB_MIGRATION) { + if (vm->state != VIR_DOMAIN_PAUSED) { + VIR_DEBUG("Requesting domain pause on %s", + vm->def->name); + priv->jobSignals |= QEMU_JOB_SIGNAL_SUSPEND; + } + ret = 0; + goto cleanup; + } else { + if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) + goto cleanup; + + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("domain is not running")); goto endjob; } - qemuDomainObjExitMonitorWithDriver(driver, vm); - vm->state = VIR_DOMAIN_PAUSED; - event = virDomainEventNewFromObj(vm, - VIR_DOMAIN_EVENT_SUSPENDED, - VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); + if (vm->state != VIR_DOMAIN_PAUSED) { + int rc; + + qemuDomainObjEnterMonitorWithDriver(driver, vm); + rc = qemuMonitorStopCPUs(priv->mon); + qemuDomainObjExitMonitorWithDriver(driver, vm); + if (rc < 0) + goto endjob; + vm->state = VIR_DOMAIN_PAUSED; + event = virDomainEventNewFromObj(vm, + VIR_DOMAIN_EVENT_SUSPENDED, + VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); + } + if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) + goto endjob; + ret = 0; } - if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) - goto endjob; - ret = 0; endjob: if (qemuDomainObjEndJob(vm) == 0) @@ -3946,6 +3977,35 @@ cleanup: } +/** qemuDomainMigrateOffline: + * Pause domain for non-live migration. + */ +static int +qemuDomainMigrateOffline(struct qemud_driver *driver, + virDomainObjPtr vm) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; + int ret; + + qemuDomainObjEnterMonitorWithDriver(driver, vm); + ret = qemuMonitorStopCPUs(priv->mon); + qemuDomainObjExitMonitorWithDriver(driver, vm); + + if (ret == 0) { + virDomainEventPtr event; + + vm->state = VIR_DOMAIN_PAUSED; + event = virDomainEventNewFromObj(vm, + VIR_DOMAIN_EVENT_SUSPENDED, + VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED); + if (event) + qemuDomainEventQueue(driver, event); + } + + return ret; +} + + static int qemuDomainWaitForMigrationComplete(struct qemud_driver *driver, virDomainObjPtr vm) { @@ -3964,8 +4024,8 @@ qemuDomainWaitForMigrationComplete(struct qemud_driver *driver, virDomainObjPtr struct timeval now; int rc; - if (priv->jobCancel) { - priv->jobCancel = 0; + if (priv->jobSignals & QEMU_JOB_SIGNAL_CANCEL) { + priv->jobSignals ^= QEMU_JOB_SIGNAL_CANCEL; VIR_DEBUG0("Cancelling migration at client request"); qemuDomainObjEnterMonitorWithDriver(driver, vm); rc = qemuMonitorMigrateCancel(priv->mon); @@ -3973,6 +4033,11 @@ qemuDomainWaitForMigrationComplete(struct qemud_driver *driver, virDomainObjPtr if (rc < 0) { VIR_WARN0("Unable to cancel migration"); } + } else if (priv->jobSignals & QEMU_JOB_SIGNAL_SUSPEND) { + priv->jobSignals ^= QEMU_JOB_SIGNAL_SUSPEND; + VIR_DEBUG0("Pausing domain for non-live migration"); + if (qemuDomainMigrateOffline(driver, vm) < 0) + VIR_WARN0("Unable to pause domain"); } qemuDomainObjEnterMonitorWithDriver(driver, vm); @@ -8979,7 +9044,7 @@ qemudDomainMigratePerform (virDomainPtr dom, virDomainObjPtr vm; virDomainEventPtr event = NULL; int ret = -1; - int paused = 0; + int resume = 0; qemuDomainObjPrivatePtr priv; qemuDriverLock(driver); @@ -8995,6 +9060,7 @@ qemudDomainMigratePerform (virDomainPtr dom, if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) goto cleanup; + priv->jobActive = QEMU_JOB_MIGRATION; if (!virDomainObjIsActive(vm)) { qemuReportError(VIR_ERR_OPERATION_INVALID, @@ -9005,23 +9071,10 @@ qemudDomainMigratePerform (virDomainPtr dom, memset(&priv->jobInfo, 0, sizeof(priv->jobInfo)); priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED; + resume = vm->state == VIR_DOMAIN_RUNNING; if (!(flags & VIR_MIGRATE_LIVE) && vm->state == VIR_DOMAIN_RUNNING) { - /* Pause domain for non-live migration */ - qemuDomainObjEnterMonitorWithDriver(driver, vm); - if (qemuMonitorStopCPUs(priv->mon) < 0) { - qemuDomainObjExitMonitorWithDriver(driver, vm); + if (qemuDomainMigrateOffline(driver, vm) < 0) goto endjob; - } - qemuDomainObjExitMonitorWithDriver(driver, vm); - paused = 1; - - vm->state = VIR_DOMAIN_PAUSED; - event = virDomainEventNewFromObj(vm, - VIR_DOMAIN_EVENT_SUSPENDED, - VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED); - if (event) - qemuDomainEventQueue(driver, event); - event = NULL; } if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) { @@ -9035,7 +9088,7 @@ qemudDomainMigratePerform (virDomainPtr dom, /* Clean up the source domain. */ qemudShutdownVMDaemon(driver, vm); - paused = 0; + resume = 0; event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED, @@ -9049,7 +9102,7 @@ qemudDomainMigratePerform (virDomainPtr dom, ret = 0; endjob: - if (paused) { + if (resume && vm->state == VIR_DOMAIN_PAUSED) { /* we got here through some sort of failure; start the domain again */ qemuDomainObjEnterMonitorWithDriver(driver, vm); if (qemuMonitorStartCPUs(priv->mon, dom->conn) < 0) { @@ -9425,7 +9478,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) { if (virDomainObjIsActive(vm)) { if (priv->jobActive) { VIR_DEBUG("Requesting cancellation of job on vm %s", vm->def->name); - priv->jobCancel = 1; + priv->jobSignals |= QEMU_JOB_SIGNAL_CANCEL; } else { qemuReportError(VIR_ERR_OPERATION_INVALID, "%s", _("no job is active on the domain")); -- 1.7.0.2

On Mon, Mar 15, 2010 at 02:19:00PM +0100, Jiri Denemark wrote:
Currently no command can be sent to a qemu process while another job is active. This patch adds support for signaling long-running jobs (such as migration) so that other threads may request predefined operations to be done during such jobs. Two signals are defined so far: - QEMU_JOB_SIGNAL_CANCEL - QEMU_JOB_SIGNAL_SUSPEND
The first one is used by qemuDomainAbortJob.
The second one is used by qemudDomainSuspend for suspending a domain during migration, which allows for changing live migration into offline migration. However, there is a small issue in the way qemudDomainSuspend is currently implemented for migrating domains. The API calls returns immediately after signaling migration job which means it is asynchronous in this specific case.
That is a fairly minor issue, and I'm not sure if we need to worry about it or not really. If we did want to fix it, then the way todo it would be to add a another condition variable to qemuDomainObjPriv, virCond jobSignalCond. after the qemuDomainSuspend sets the job signal, it should then wait on the condition variable. The qemuDomainWaitForMigrationComplete() would signal the condition variable when it had processed it.
@@ -3560,6 +3572,7 @@ static int qemudDomainSuspend(virDomainPtr dom) { virDomainObjPtr vm; int ret = -1; virDomainEventPtr event = NULL; + qemuDomainObjPrivatePtr priv;
qemuDriverLock(driver); vm = virDomainFindByUUID(&driver->domains, dom->uuid); @@ -3571,30 +3584,48 @@ static int qemudDomainSuspend(virDomainPtr dom) { _("no domain with matching uuid '%s'"), uuidstr); goto cleanup; } - if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) - goto cleanup; - if (!virDomainObjIsActive(vm)) { qemuReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); - goto endjob; + goto cleanup; } - if (vm->state != VIR_DOMAIN_PAUSED) { - qemuDomainObjPrivatePtr priv = vm->privateData; - qemuDomainObjEnterMonitorWithDriver(driver, vm); - if (qemuMonitorStopCPUs(priv->mon) < 0) { - qemuDomainObjExitMonitorWithDriver(driver, vm); + + priv = vm->privateData; + + if (priv->jobActive == QEMU_JOB_MIGRATION) { + if (vm->state != VIR_DOMAIN_PAUSED) { + VIR_DEBUG("Requesting domain pause on %s", + vm->def->name); + priv->jobSignals |= QEMU_JOB_SIGNAL_SUSPEND;
If we did the condition variable thing I mentioned, then you'd also need to check to see if the signal was already set, and raise an error, since you don't want to race multiple concurrent suspend calls on the condition variable
+ } + ret = 0; + goto cleanup; + } else { + if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) + goto cleanup; + + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("domain is not running")); goto endjob; } - qemuDomainObjExitMonitorWithDriver(driver, vm); - vm->state = VIR_DOMAIN_PAUSED; - event = virDomainEventNewFromObj(vm, - VIR_DOMAIN_EVENT_SUSPENDED, - VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); + if (vm->state != VIR_DOMAIN_PAUSED) { + int rc; + + qemuDomainObjEnterMonitorWithDriver(driver, vm); + rc = qemuMonitorStopCPUs(priv->mon); + qemuDomainObjExitMonitorWithDriver(driver, vm); + if (rc < 0) + goto endjob; + vm->state = VIR_DOMAIN_PAUSED; + event = virDomainEventNewFromObj(vm, + VIR_DOMAIN_EVENT_SUSPENDED, + VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); + } + if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) + goto endjob; + ret = 0; } - if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) - goto endjob; - ret = 0;
endjob: if (qemuDomainObjEndJob(vm) == 0)
ACK to this patch. If you want to solve the async problem with a condition variable it can be a add-on patch later Regards, Daniel -- |: Red Hat, Engineering, London -o- http://people.redhat.com/berrange/ :| |: http://libvirt.org -o- http://virt-manager.org -o- http://deltacloud.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: GnuPG: 7D3B9505 -o- F3C9 553F A1DA 4AC2 5648 23C1 B3DF F742 7D3B 9505 :|
participants (2)
-
Daniel P. Berrange
-
Jiri Denemark