The design of the stats fields returned for VIR_DOMAIN_STATS_IOTHREAD
domain statistics groups deviates from the established pattern. In this
instance it's impossible to infer which values of <id> for
iothread.<id>... fields will be reported back because they have no
connection to the iothread.count field.
Introduce iothread.ids which will report a comma-separated list of <id>s
reported in the subsequent array in the order they will be reported.
virsh domstats upstream --iothread
Domain: 'upstream'
iothread.count=2
iothread.ids=7,5
iothread.7.poll-max-ns=32768
iothread.7.poll-grow=0
iothread.7.poll-shrink=0
iothread.5.poll-max-ns=32768
iothread.5.poll-grow=0
iothread.5.poll-shrink=0
Signed-off-by: Peter Krempa <pkrempa(a)redhat.com>
---
src/libvirt-domain.c | 2 ++
src/qemu/qemu_driver.c | 11 +++++++++++
2 files changed, 13 insertions(+)
diff --git a/src/libvirt-domain.c b/src/libvirt-domain.c
index 87110036ca..e6d5697445 100644
--- a/src/libvirt-domain.c
+++ b/src/libvirt-domain.c
@@ -11620,6 +11620,8 @@ virConnectGetDomainCapabilities(virConnectPtr conn,
* will use it's iothread_id value as the <id>. There
* may be fewer <id> entries than the iothread.count
* value if the polling values are not supported.
+ * "iothread.ids" - a comma separated list of iotdread <id>s reported
in the
+ * subsequent list reported as a string
* "iothread.<id>.poll-max-ns" - maximum polling time in ns as an
unsigned
* long long. A 0 (zero) means polling is
* disabled.
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index b5300241a8..4ccc9d3d4e 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -21189,6 +21189,8 @@ qemuDomainGetStatsIOThread(virQEMUDriverPtr driver,
qemuMonitorIOThreadInfoPtr *iothreads = NULL;
int niothreads;
int ret = -1;
+ g_auto(virBuffer) iothridbuf = VIR_BUFFER_INITIALIZER;
+ g_autofree char *iothridstr = NULL;
if (!HAVE_JOB(privflags) || !virDomainObjIsActive(dom))
return 0;
@@ -21205,6 +21207,15 @@ qemuDomainGetStatsIOThread(virQEMUDriverPtr driver,
if (virTypedParamListAddUInt(params, niothreads, "iothread.count") < 0)
goto cleanup;
+ for (i = 0; i < niothreads; i++)
+ virBufferAsprintf(&iothridbuf, "%u,",
iothreads[i]->iothread_id);
+
+ virBufferTrim(&iothridbuf, ",", -1);
+ iothridstr = virBufferContentAndReset(&iothridbuf);
+
+ if (virTypedParamListAddString(params, iothridstr, "iothread.ids") < 0)
+ goto cleanup;
+
for (i = 0; i < niothreads; i++) {
if (iothreads[i]->poll_valid) {
if (virTypedParamListAddULLong(params, iothreads[i]->poll_max_ns,
--
2.23.0