On 01/14/2016 11:27 AM, Peter Krempa wrote:
Due to bad design the vcpu sched element is orthogonal to the way
how
the data belongs to the corresponding objects. Now that vcpus are a
struct that allow to store other info too, let's convert the data to the
sane structure.
The helpers for the conversion are made universal so that they can be
reused for iothreads too.
This patch also resolves
https://bugzilla.redhat.com/show_bug.cgi?id=1235180
since with the correct storage approach you can't have dangling data.
---
src/conf/domain_conf.c | 231 +++++++++++++++++++++++++++++++++++++++---------
src/conf/domain_conf.h | 8 +-
src/qemu/qemu_driver.c | 6 +-
src/qemu/qemu_process.c | 8 +-
4 files changed, 202 insertions(+), 51 deletions(-)
As noted from my review of 10/34, I'm just noting something Coverity
found - will review more as I get to this later.
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index b4f6fe9..e2dda9a 100644
[...]
static int
+virDomainFormatSchedDef(virDomainDefPtr def,
+ virBufferPtr buf,
+ const char *name,
+ virDomainThreadSchedParamPtr (*func)(virDomainDefPtr, unsigned
int),
+ virBitmapPtr resourceMap)
+{
+ virBitmapPtr schedMap = NULL;
+ virBitmapPtr prioMap = NULL;
+ virDomainThreadSchedParamPtr sched;
+ char *tmp = NULL;
+ ssize_t next;
+ size_t i;
+ int ret = -1;
+
+ if (!(schedMap = virBitmapNew(VIR_DOMAIN_CPUMASK_LEN)) ||
+ !(prioMap = virBitmapNew(VIR_DOMAIN_CPUMASK_LEN)))
+ goto cleanup;
+
+ for (i = VIR_PROC_POLICY_NONE + 1; i < VIR_PROC_POLICY_LAST; i++) {
+ virBitmapClearAll(schedMap);
+
+ /* find vcpus using a particular scheduler */
+ next = -1;
+ while ((next = virBitmapNextSetBit(resourceMap, next)) > -1) {
+ sched = func(def, next);
+
+ if (sched->policy == i)
+ ignore_value(virBitmapSetBit(schedMap, next));
+ }
+
+ /* it's necessary to discriminate priority levels for schedulers that
+ * have them */
+ while (!virBitmapIsAllClear(schedMap)) {
+ virBitmapPtr currentMap = NULL;
+ ssize_t nextprio;
+ bool hasPriority = false;
+ int priority;
+
+ switch ((virProcessSchedPolicy) i) {
+ case VIR_PROC_POLICY_NONE:
+ case VIR_PROC_POLICY_BATCH:
+ case VIR_PROC_POLICY_IDLE:
+ case VIR_PROC_POLICY_LAST:
+ currentMap = schedMap;
+ break;
+
+ case VIR_PROC_POLICY_FIFO:
+ case VIR_PROC_POLICY_RR:
+ virBitmapClearAll(prioMap);
+ hasPriority = true;
+
+ /* we need to find a subset of vCPUs with the given scheduler
+ * that share the priority */
+ nextprio = virBitmapNextSetBit(schedMap, -1);
Coverity notes that virBitmapNextSetBit can return -1; however, [1]
+ sched = func(def, nextprio);
+ priority = sched->priority;
+
+ ignore_value(virBitmapSetBit(prioMap, nextprio));
[1] passing a -1 'nextprio' to virBitmapSetBit as 'size_t b' cannot
happen.
+
+ while ((nextprio = virBitmapNextSetBit(schedMap, nextprio)) > -1) {
+ sched = func(def, nextprio);
+ if (sched->priority == priority)
+ ignore_value(virBitmapSetBit(prioMap, nextprio));
+ }
+
+ currentMap = prioMap;
+ break;
+ }
+
+ /* now we have the complete group */
+ if (!(tmp = virBitmapFormat(currentMap)))
+ goto cleanup;
+
+ virBufferAsprintf(buf,
+ "<%ssched %ss='%s'
scheduler='%s'",
+ name, name, tmp,
+ virProcessSchedPolicyTypeToString(i));
+ VIR_FREE(tmp);
+
+ if (hasPriority)
+ virBufferAsprintf(buf, " priority='%d'", priority);
+
+ virBufferAddLit(buf, "/>\n");
+
+ /* subtract all vCPUs that were already found */
+ virBitmapSubtract(schedMap, currentMap);
+ }
+ }
+
+ ret = 0;
+
+ cleanup:
+ virBitmapFree(schedMap);
+ virBitmapFree(prioMap);
+ return ret;
+}
+
+
[...]