On 09/15/14 10:48, Francesco Romani wrote:
This patch implements the VIR_DOMAIN_STATS_VCPU
group of statistics.
To do so, this patch also extracts a helper to gather the
VCpu information.
Signed-off-by: Francesco Romani <fromani(a)redhat.com>
---
include/libvirt/libvirt.h.in | 1 +
src/libvirt.c | 12 +++
src/qemu/qemu_driver.c | 201 +++++++++++++++++++++++++++++--------------
3 files changed, 150 insertions(+), 64 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
index a5033ed..4b851a5 100644
--- a/include/libvirt/libvirt.h.in
+++ b/include/libvirt/libvirt.h.in
@@ -2515,6 +2515,7 @@ typedef enum {
VIR_DOMAIN_STATS_STATE = (1 << 0), /* return domain state */
VIR_DOMAIN_STATS_CPU_TOTAL = (1 << 1), /* return domain CPU info */
VIR_DOMAIN_STATS_BALLOON = (1 << 2), /* return domain balloon info */
+ VIR_DOMAIN_STATS_VCPU = (1 << 3), /* return domain virtual CPU info */
} virDomainStatsTypes;
typedef enum {
diff --git a/src/libvirt.c b/src/libvirt.c
index b3b71a0..1d91c99 100644
--- a/src/libvirt.c
+++ b/src/libvirt.c
@@ -21609,6 +21609,18 @@ virConnectGetDomainCapabilities(virConnectPtr conn,
* "balloon.maximum" - the maximum memory in kiB allowed
* as unsigned long long.
*
+ * VIR_DOMAIN_STATS_VCPU: Return virtual CPU statistics.
+ * Due to VCPU hotplug, the vcpu.<num>.* array could be sparse.
+ * The actual size of the array correspond to "vcpu.current".
+ * The array size will never exceed "vcpu.maximum".
+ * The typed parameter keys are in this format:
+ * "vcpu.current" - current number of online virtual CPUs as unsigned int.
+ * "vcpu.maximum" - maximum number of online virtual CPUs as unsigned int.
+ * "vcpu.<num>.state" - state of the virtual CPU <num>, as int
+ * from virVcpuState enum.
+ * "vcpu.<num>.time" - virtual cpu time spent by virtual CPU
<num>
+ * as unsigned long long.
+ *
* Using 0 for @stats returns all stats groups supported by the given
* hypervisor.
*
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 745b4f1..4a92f58 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -1380,6 +1380,76 @@ qemuGetProcessInfo(unsigned long long *cpuTime, int *lastCpu, long
*vm_rss,
}
+static int
+qemuDomainHelperGetVcpus(virDomainObjPtr vm, virVcpuInfoPtr info, int maxinfo,
+ unsigned char *cpumaps, int maplen)
+{
+ int maxcpu, hostcpus;
+ size_t i, v;
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+
+ if ((hostcpus = nodeGetCPUCount()) < 0)
+ return -1;
+
+ maxcpu = maplen * 8;
+ if (maxcpu > hostcpus)
+ maxcpu = hostcpus;
+
+ /* Clamp to actual number of vcpus */
+ if (maxinfo > priv->nvcpupids)
+ maxinfo = priv->nvcpupids;
nvcpupids is 0 if the VM is offline, thus:
+
+ if (maxinfo >= 1) {
+ if (info != NULL) {
+ memset(info, 0, sizeof(*info) * maxinfo);
+ for (i = 0; i < maxinfo; i++) {
+ info[i].number = i;
You don't initialize the returned CPU numbers to sane values (they stay
"0")
+ info[i].state = VIR_VCPU_RUNNING;
+
+ if (priv->vcpupids != NULL &&
+ qemuGetProcessInfo(&(info[i].cpuTime),
+ &(info[i].cpu),
+ NULL,
+ vm->pid,
+ priv->vcpupids[i]) < 0) {
+ virReportSystemError(errno, "%s",
+ _("cannot get vCPU placement & pCPU
time"));
+ return -1;
+ }
+ }
+ }
+
+ if (cpumaps != NULL) {
+ memset(cpumaps, 0, maplen * maxinfo);
+ if (priv->vcpupids != NULL) {
+ for (v = 0; v < maxinfo; v++) {
+ unsigned char *cpumap = VIR_GET_CPUMAP(cpumaps, maplen, v);
+ virBitmapPtr map = NULL;
+ unsigned char *tmpmap = NULL;
+ int tmpmapLen = 0;
+
+ if (virProcessGetAffinity(priv->vcpupids[v],
+ &map, maxcpu) < 0)
+ return -1;
+ virBitmapToData(map, &tmpmap, &tmpmapLen);
+ if (tmpmapLen > maplen)
+ tmpmapLen = maplen;
+ memcpy(cpumap, tmpmap, tmpmapLen);
+
+ VIR_FREE(tmpmap);
+ virBitmapFree(map);
+ }
+ } else {
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("cpu affinity is not
available"));
+ return -1;
+ }
+ }
+ }
+ return maxinfo;
+}
+
+
static virDomainPtr qemuDomainLookupByID(virConnectPtr conn,
int id)
{
@@ -17465,6 +17472,71 @@ qemuDomainGetStatsBalloon(virQEMUDriverPtr
driver ATTRIBUTE_UNUSED,
return 0;
}
+
+static int
+qemuDomainGetStatsVcpu(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
+ virDomainObjPtr dom,
+ virDomainStatsRecordPtr record,
+ int *maxparams,
+ unsigned int privflags ATTRIBUTE_UNUSED)
+{
+ size_t i;
+ int ret = -1;
+ char param_name[VIR_TYPED_PARAM_FIELD_LENGTH];
+ virVcpuInfoPtr cpuinfo = NULL;
+
+ if (virTypedParamsAddUInt(&record->params,
+ &record->nparams,
+ maxparams,
+ "vcpu.current",
+ (unsigned) dom->def->vcpus) < 0)
+ return -1;
+
+ if (virTypedParamsAddUInt(&record->params,
+ &record->nparams,
+ maxparams,
+ "vcpu.maximum",
+ (unsigned) dom->def->maxvcpus) < 0)
+ return -1;
+
+ if (VIR_ALLOC_N(cpuinfo, dom->def->vcpus) < 0)
+ return -1;
+
+ if (qemuDomainHelperGetVcpus(dom, cpuinfo, dom->def->vcpus,
+ NULL, 0) < 0) {
+ virResetLastError();
+ ret = 0; /* it's ok to be silent and go ahead */
+ goto cleanup;
+ }
+
+ for (i = 0; i < dom->def->vcpus; i++) {
+ snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH,
+ "vcpu.%u.state", cpuinfo[i].number);
.. and this then tries to assign the "vcpu.0.state" parameter multiple
times when a VM has multiple CPUs and is offline. I'll add a check that
will skip this if the domain is offline.
+ if (virTypedParamsAddInt(&record->params,
+ &record->nparams,
+ maxparams,
+ param_name,
+ cpuinfo[i].state) < 0)
+ goto cleanup;
+
+ snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH,
+ "vcpu.%u.time", cpuinfo[i].number);
+ if (virTypedParamsAddULLong(&record->params,
+ &record->nparams,
+ maxparams,
+ param_name,
+ cpuinfo[i].cpuTime) < 0)
+ goto cleanup;
+ }
+
+ ret = 0;
+
+ cleanup:
+ VIR_FREE(cpuinfo);
+ return ret;
+}
+
+
typedef int
(*qemuDomainGetStatsFunc)(virQEMUDriverPtr driver,
virDomainObjPtr dom,
Peter