This patch creates an optional BeginJob queue size limit. When
active, all other attempts above level will fail. To set this
feature assign desired value to max_queued variable in qemu.conf.
Setting it to 0 turns it off.
---
src/qemu/libvirtd_qemu.aug | 1 +
src/qemu/qemu.conf | 7 +++++++
src/qemu/qemu_conf.c | 4 ++++
src/qemu/qemu_conf.h | 2 ++
src/qemu/qemu_domain.c | 17 +++++++++++++++++
src/qemu/qemu_domain.h | 2 ++
6 files changed, 33 insertions(+), 0 deletions(-)
diff --git a/src/qemu/libvirtd_qemu.aug b/src/qemu/libvirtd_qemu.aug
index d018ac2..6c145c7 100644
--- a/src/qemu/libvirtd_qemu.aug
+++ b/src/qemu/libvirtd_qemu.aug
@@ -51,6 +51,7 @@ module Libvirtd_qemu =
| bool_entry "set_process_name"
| int_entry "max_processes"
| str_entry "lock_manager"
+ | int_entry "max_queued"
(* Each enty in the config is one of the following three ... *)
let entry = vnc_entry
diff --git a/src/qemu/qemu.conf b/src/qemu/qemu.conf
index 79c6e85..4da5d5a 100644
--- a/src/qemu/qemu.conf
+++ b/src/qemu/qemu.conf
@@ -309,3 +309,10 @@
# disk), uncomment this
#
# lock_manager = "sanlock"
+
+# Set limit of maximum APIs queued on one domain. All other APIs
+# over this threshold will fail on acquiring job lock. Specially,
+# setting to zero turns this feature off.
+# Note, that job lock is per domain.
+#
+# max_queued = 0
diff --git a/src/qemu/qemu_conf.c b/src/qemu/qemu_conf.c
index 443e08d..d1bf075 100644
--- a/src/qemu/qemu_conf.c
+++ b/src/qemu/qemu_conf.c
@@ -458,6 +458,10 @@ int qemudLoadDriverConfig(struct qemud_driver *driver,
VIR_FREE(lockConf);
}
+ p = virConfGetValue(conf, "max_queued");
+ CHECK_TYPE("max_queued", VIR_CONF_LONG);
+ if (p) driver->max_queued = p->l;
+
virConfFree (conf);
return 0;
}
diff --git a/src/qemu/qemu_conf.h b/src/qemu/qemu_conf.h
index 0a60d32..098e94e 100644
--- a/src/qemu/qemu_conf.h
+++ b/src/qemu/qemu_conf.h
@@ -108,6 +108,8 @@ struct qemud_driver {
int maxProcesses;
+ int max_queued;
+
virCapsPtr caps;
virDomainEventStatePtr domainEventState;
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 675c6df..982bad6 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -713,6 +713,8 @@ qemuDomainObjBeginJobInternal(struct qemud_driver *driver,
unsigned long long then;
bool nested = job == QEMU_JOB_ASYNC_NESTED;
+ priv->jobs_queued++;
+
if (virTimeMs(&now) < 0)
return -1;
then = now + QEMU_JOB_WAIT_TIME;
@@ -722,6 +724,11 @@ qemuDomainObjBeginJobInternal(struct qemud_driver *driver,
qemuDriverUnlock(driver);
retry:
+ if (driver->max_queued &&
+ priv->jobs_queued > driver->max_queued) {
+ goto error;
+ }
+
while (!nested && !qemuDomainJobAllowed(priv, job)) {
if (virCondWaitUntil(&priv->job.asyncCond, &obj->lock, then) <
0)
goto error;
@@ -761,9 +768,15 @@ error:
if (errno == ETIMEDOUT)
qemuReportError(VIR_ERR_OPERATION_TIMEOUT,
"%s", _("cannot acquire state change
lock"));
+ else if (driver->max_queued &&
+ priv->jobs_queued > driver->max_queued)
+ qemuReportError(VIR_ERR_OPERATION_FAILED,
+ "%s", _("cannot acquire state change lock "
+ "due to max_queued limit"));
else
virReportSystemError(errno,
"%s", _("cannot acquire job mutex"));
+ priv->jobs_queued--;
if (driver_locked) {
virDomainObjUnlock(obj);
qemuDriverLock(driver);
@@ -844,6 +857,8 @@ int qemuDomainObjEndJob(struct qemud_driver *driver, virDomainObjPtr
obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
+ priv->jobs_queued--;
+
qemuDomainObjResetJob(priv);
qemuDomainObjSaveJob(driver, obj);
virCondSignal(&priv->job.cond);
@@ -856,6 +871,8 @@ qemuDomainObjEndAsyncJob(struct qemud_driver *driver, virDomainObjPtr
obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
+ priv->jobs_queued--;
+
qemuDomainObjResetAsyncJob(priv);
qemuDomainObjSaveJob(driver, obj);
virCondBroadcast(&priv->job.asyncCond);
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index e12ca8e..55875fe 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -113,6 +113,8 @@ struct _qemuDomainObjPrivate {
char *lockState;
bool fakeReboot;
+
+ int jobs_queued;
};
struct qemuDomainWatchdogEvent
--
1.7.3.4