Future patches which will implement more
bulk stats groups for QEMU will need to access
the connection object.
To accomodate that, a few changes are needed:
* enrich internal prototype to pass connection object.
* add per-group flag to mark if one collector needs
monitor access or not.
* if at least one collector of the requested stats
needs monitor access, thus we must start a query job
for each domain. The specific collectors will
run nested monitor jobs inside that.
Signed-off-by: Francesco Romani <fromani(a)redhat.com>
---
src/qemu/qemu_driver.c | 51 ++++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 43 insertions(+), 8 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index d724eeb..2950a4b 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -17314,7 +17314,8 @@ qemuConnectGetDomainCapabilities(virConnectPtr conn,
static int
-qemuDomainGetStatsState(virDomainObjPtr dom,
+qemuDomainGetStatsState(virConnectPtr conn ATTRIBUTE_UNUSED,
+ virDomainObjPtr dom,
virDomainStatsRecordPtr record,
int *maxparams,
unsigned int privflags ATTRIBUTE_UNUSED)
@@ -17338,7 +17339,8 @@ qemuDomainGetStatsState(virDomainObjPtr dom,
typedef int
-(*qemuDomainGetStatsFunc)(virDomainObjPtr dom,
+(*qemuDomainGetStatsFunc)(virConnectPtr conn,
+ virDomainObjPtr dom,
virDomainStatsRecordPtr record,
int *maxparams,
unsigned int flags);
@@ -17346,11 +17348,12 @@ typedef int
struct qemuDomainGetStatsWorker {
qemuDomainGetStatsFunc func;
unsigned int stats;
+ bool monitor;
};
static struct qemuDomainGetStatsWorker qemuDomainGetStatsWorkers[] = {
- { qemuDomainGetStatsState, VIR_DOMAIN_STATS_STATE},
- { NULL, 0 }
+ { qemuDomainGetStatsState, VIR_DOMAIN_STATS_STATE, false },
+ { NULL, 0, false }
};
@@ -17382,6 +17385,20 @@ qemuDomainGetStatsCheckSupport(unsigned int *stats,
}
+static bool
+qemuDomainGetStatsNeedMonitor(unsigned int stats)
+{
+ size_t i;
+
+ for (i = 0; qemuDomainGetStatsWorkers[i].func; i++)
+ if (stats & qemuDomainGetStatsWorkers[i].stats)
+ if (qemuDomainGetStatsWorkers[i].monitor)
+ return true;
+
+ return false;
+}
+
+
static int
qemuDomainGetStats(virConnectPtr conn,
virDomainObjPtr dom,
@@ -17399,7 +17416,7 @@ qemuDomainGetStats(virConnectPtr conn,
for (i = 0; qemuDomainGetStatsWorkers[i].func; i++) {
if (stats & qemuDomainGetStatsWorkers[i].stats) {
- if (qemuDomainGetStatsWorkers[i].func(dom, tmp, &maxparams,
+ if (qemuDomainGetStatsWorkers[i].func(conn, dom, tmp, &maxparams,
flags) < 0)
goto cleanup;
}
@@ -17435,6 +17452,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
virDomainObjPtr dom = NULL;
virDomainStatsRecordPtr *tmpstats = NULL;
bool enforce = !!(flags & VIR_CONNECT_GET_ALL_DOMAINS_STATS_ENFORCE_STATS);
+ bool needmon = false;
int ntempdoms;
int nstats = 0;
size_t i;
@@ -17473,6 +17491,8 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
if (VIR_ALLOC_N(tmpstats, ndoms + 1) < 0)
goto cleanup;
+ needmon = qemuDomainGetStatsNeedMonitor(stats);
+
for (i = 0; i < ndoms; i++) {
virDomainStatsRecordPtr tmp = NULL;
@@ -17483,11 +17503,21 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
!virConnectGetAllDomainStatsCheckACL(conn, dom->def))
continue;
- if (qemuDomainGetStats(conn, dom, stats, &tmp, flags) < 0)
+ if (needmon && qemuDomainObjBeginJob(driver, dom, QEMU_JOB_QUERY) <
0)
goto cleanup;
- if (tmp)
- tmpstats[nstats++] = tmp;
+ if ((needmon && virDomainObjIsActive(dom)) || !needmon) {
+ if (qemuDomainGetStats(conn, dom, stats, &tmp, flags) < 0)
+ goto endjob;
+
+ if (tmp)
+ tmpstats[nstats++] = tmp;
+ }
+
+ if (needmon && !qemuDomainObjEndJob(driver, dom)) {
+ dom = NULL;
+ goto cleanup;
+ }
virObjectUnlock(dom);
dom = NULL;
@@ -17498,6 +17528,11 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
ret = nstats;
+ endjob:
+ if (needmon && dom)
+ if (!qemuDomainObjEndJob(driver, dom))
+ dom = NULL;
+
cleanup:
if (dom)
virObjectUnlock(dom);
--
1.9.3