When an asynchronous job is running while another API that is
incompatible with that job is called, we now try to wait until the job
finishes and either run the API or fail with timeout. I guess nicer
solution is to just fail such API immediately and let the application
retry once the asynchronous job ends.
---
src/qemu/THREADS.txt | 5 ++---
src/qemu/qemu_domain.c | 28 +++++++++++++++-------------
2 files changed, 17 insertions(+), 16 deletions(-)
diff --git a/src/qemu/THREADS.txt b/src/qemu/THREADS.txt
index 3a27a85..9183f1f 100644
--- a/src/qemu/THREADS.txt
+++ b/src/qemu/THREADS.txt
@@ -69,8 +69,7 @@ There are a number of locks on various objects
specify what kind of action it is about to take and this is checked
against the allowed set of jobs in case an asynchronous job is
running. If the job is incompatible with current asynchronous job,
- it needs to wait until the asynchronous job ends and try to acquire
- the job again.
+ the operation fails.
Immediately after acquiring the virDomainObjPtr lock, any method
which intends to update state must acquire either asynchronous or
@@ -80,7 +79,7 @@ There are a number of locks on various objects
whenever it hits a piece of code which may sleep/wait, and
re-acquire it after the sleep/wait. Whenever an asynchronous job
wants to talk to the monitor, it needs to acquire nested job (a
- special kind of normla job) to obtain exclusive access to the
+ special kind of normal job) to obtain exclusive access to the
monitor.
Since the virDomainObjPtr lock was dropped while waiting for the
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index f9755a4..b2a36ad 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -723,21 +723,25 @@ qemuDomainObjBeginJobInternal(struct qemud_driver *driver,
if (driver_locked)
qemuDriverUnlock(driver);
-retry:
- while (!nested && !qemuDomainJobAllowed(priv, job)) {
- if (virCondWaitUntil(&priv->job.asyncCond, &obj->lock, then) <
0)
- goto error;
- }
+ if (!nested && !qemuDomainJobAllowed(priv, job))
+ goto not_allowed;
while (priv->job.active) {
- if (virCondWaitUntil(&priv->job.cond, &obj->lock, then) < 0)
+ if (virCondWaitUntil(&priv->job.cond, &obj->lock, then) < 0) {
+ if (errno == ETIMEDOUT)
+ qemuReportError(VIR_ERR_OPERATION_TIMEOUT,
+ "%s", _("cannot acquire state change
lock"));
+ else
+ virReportSystemError(errno,
+ "%s", _("cannot acquire job
mutex"));
goto error;
+ }
}
/* No job is active but a new async job could have been started while obj
* was unlocked, so we need to recheck it. */
if (!nested && !qemuDomainJobAllowed(priv, job))
- goto retry;
+ goto not_allowed;
qemuDomainObjResetJob(priv);
@@ -759,13 +763,11 @@ retry:
return 0;
+not_allowed:
+ qemuReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("incompatible job is running"));
+
error:
- if (errno == ETIMEDOUT)
- qemuReportError(VIR_ERR_OPERATION_TIMEOUT,
- "%s", _("cannot acquire state change
lock"));
- else
- virReportSystemError(errno,
- "%s", _("cannot acquire job mutex"));
if (driver_locked) {
virDomainObjUnlock(obj);
qemuDriverLock(driver);
--
1.7.6