From: Francesco Romani <fromani(a)redhat.com>
Future patches which will implement more bulk stats groups for QEMU will
need to access the connection object.
To accomodate that, a few changes are needed:
* enrich internal prototype to pass qemu driver object
* add per-group flag to mark if one collector needs monitor access or not
* If at least one collector of the requested stats needs monitor access
we must start a query job for each domain. The specific collectors
will run nested monitor jobs inside that.
* If the job can't be acquired we pass flags to the collector so
specific collectors that need monitor access can be skipped in order
to gather as much data as is possible.
Signed-off-by: Francesco Romani <fromani(a)redhat.com>
Signed-off-by: Peter Krempa <pkrempa(a)redhat.com>
---
Notes:
Version 6:
- renamed HAVE_MONITOR to HAVE_JOB
src/qemu/qemu_driver.c | 63 +++++++++++++++++++++++++++++++++++++++++++-------
1 file changed, 55 insertions(+), 8 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 73edda3..8bf893e 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -17356,7 +17356,8 @@ qemuConnectGetDomainCapabilities(virConnectPtr conn,
static int
-qemuDomainGetStatsState(virDomainObjPtr dom,
+qemuDomainGetStatsState(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
+ virDomainObjPtr dom,
virDomainStatsRecordPtr record,
int *maxparams,
unsigned int privflags ATTRIBUTE_UNUSED)
@@ -17379,8 +17380,18 @@ qemuDomainGetStatsState(virDomainObjPtr dom,
}
+typedef enum {
+ QEMU_DOMAIN_STATS_HAVE_JOB = (1 << 0), /* job is entered, monitor can be
+ accessed */
+} qemuDomainStatsFlags;
+
+
+#define HAVE_JOB(flags) ((flags) & QEMU_DOMAIN_STATS_HAVE_JOB)
+
+
typedef int
-(*qemuDomainGetStatsFunc)(virDomainObjPtr dom,
+(*qemuDomainGetStatsFunc)(virQEMUDriverPtr driver,
+ virDomainObjPtr dom,
virDomainStatsRecordPtr record,
int *maxparams,
unsigned int flags);
@@ -17388,11 +17399,12 @@ typedef int
struct qemuDomainGetStatsWorker {
qemuDomainGetStatsFunc func;
unsigned int stats;
+ bool monitor;
};
static struct qemuDomainGetStatsWorker qemuDomainGetStatsWorkers[] = {
- { qemuDomainGetStatsState, VIR_DOMAIN_STATS_STATE},
- { NULL, 0 }
+ { qemuDomainGetStatsState, VIR_DOMAIN_STATS_STATE, false },
+ { NULL, 0, false }
};
@@ -17424,6 +17436,20 @@ qemuDomainGetStatsCheckSupport(unsigned int *stats,
}
+static bool
+qemuDomainGetStatsNeedMonitor(unsigned int stats)
+{
+ size_t i;
+
+ for (i = 0; qemuDomainGetStatsWorkers[i].func; i++)
+ if (stats & qemuDomainGetStatsWorkers[i].stats)
+ if (qemuDomainGetStatsWorkers[i].monitor)
+ return true;
+
+ return false;
+}
+
+
static int
qemuDomainGetStats(virConnectPtr conn,
virDomainObjPtr dom,
@@ -17441,8 +17467,8 @@ qemuDomainGetStats(virConnectPtr conn,
for (i = 0; qemuDomainGetStatsWorkers[i].func; i++) {
if (stats & qemuDomainGetStatsWorkers[i].stats) {
- if (qemuDomainGetStatsWorkers[i].func(dom, tmp, &maxparams,
- flags) < 0)
+ if (qemuDomainGetStatsWorkers[i].func(conn->privateData, dom, tmp,
+ &maxparams, flags) < 0)
goto cleanup;
}
}
@@ -17481,6 +17507,8 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
int nstats = 0;
size_t i;
int ret = -1;
+ unsigned int privflags = 0;
+ unsigned int domflags = 0;
if (ndoms)
virCheckFlags(VIR_CONNECT_GET_ALL_DOMAINS_STATS_ENFORCE_STATS, -1);
@@ -17515,7 +17543,11 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
if (VIR_ALLOC_N(tmpstats, ndoms + 1) < 0)
goto cleanup;
+ if (qemuDomainGetStatsNeedMonitor(stats))
+ privflags |= QEMU_DOMAIN_STATS_HAVE_JOB;
+
for (i = 0; i < ndoms; i++) {
+ domflags = privflags;
virDomainStatsRecordPtr tmp = NULL;
if (!(dom = qemuDomObjFromDomain(doms[i])))
@@ -17525,12 +17557,22 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
!virConnectGetAllDomainStatsCheckACL(conn, dom->def))
continue;
- if (qemuDomainGetStats(conn, dom, stats, &tmp, flags) < 0)
- goto cleanup;
+ if (HAVE_JOB(domflags) &&
+ qemuDomainObjBeginJob(driver, dom, QEMU_JOB_QUERY) < 0)
+ /* As it was never requested. Gather as much as possible anyway. */
+ domflags &= ~QEMU_DOMAIN_STATS_HAVE_JOB;
+
+ if (qemuDomainGetStats(conn, dom, stats, &tmp, domflags) < 0)
+ goto endjob;
if (tmp)
tmpstats[nstats++] = tmp;
+ if (HAVE_JOB(domflags) && !qemuDomainObjEndJob(driver, dom)) {
+ dom = NULL;
+ continue;
+ }
+
virObjectUnlock(dom);
dom = NULL;
}
@@ -17540,6 +17582,11 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
ret = nstats;
+ endjob:
+ if (HAVE_JOB(domflags) && dom)
+ if (!qemuDomainObjEndJob(driver, dom))
+ dom = NULL;
+
cleanup:
if (dom)
virObjectUnlock(dom);
--
2.1.0