This patch implements the VIR_DOMAIN_STATS_VCPU
group of statistics.
To do so, this patch also extracts a helper to gather the
VCpu information.
Signed-off-by: Francesco Romani <fromani(a)redhat.com>
---
include/libvirt/libvirt.h.in | 1 +
src/libvirt.c | 13 +++
src/qemu/qemu_driver.c | 210 ++++++++++++++++++++++++++++++-------------
3 files changed, 160 insertions(+), 64 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
index 1e4e428..68573a0 100644
--- a/include/libvirt/libvirt.h.in
+++ b/include/libvirt/libvirt.h.in
@@ -2513,6 +2513,7 @@ typedef enum {
VIR_DOMAIN_STATS_STATE = (1 << 0), /* return domain state */
VIR_DOMAIN_STATS_CPU_TOTAL = (1 << 1), /* return domain CPU info */
VIR_DOMAIN_STATS_BALLOON = (1 << 2), /* return domain balloon info */
+ VIR_DOMAIN_STATS_VCPU = (1 << 3), /* return domain virtual CPU info */
} virDomainStatsTypes;
typedef enum {
diff --git a/src/libvirt.c b/src/libvirt.c
index f21eb39..0326847 100644
--- a/src/libvirt.c
+++ b/src/libvirt.c
@@ -21569,6 +21569,19 @@ virConnectGetDomainCapabilities(virConnectPtr conn,
* "balloon.maximum" - the maximum memory in kiB allowed
* as unsigned long long.
*
+ * VIR_DOMAIN_STATS_VCPU: Return virtual CPU statistics.
+ * Due to VCPU hotplug, the vcpu.<num>.* array could be sparse.
+ * The actual size of the array correspond to "vcpu.current".
+ * The array size will never exceed "vcpu.maximum".
+ * The typed parameter keys are in this format:
+ * "vcpu.current" - current number of online virtual CPUs as unsigned int.
+ * "vcpu.maximum" - maximum number of online virtual CPUs as unsigned int.
+ * "vcpu.<num>.state" - state of the virtual CPU <num>, as int
+ * from virVcpuState enum.
+ * "vcpu.<num>.time" - virtual cpu time spent by virtual CPU <num>
+ * as unsigned long long.
+ * "vcpu.<num>.cpu" - physical CPU pinned to virtual CPU <num> as
int.
+ *
* Using 0 for @stats returns all stats groups supported by the given
* hypervisor.
*
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 4f8ccac..6bcbfb5 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -1375,6 +1375,76 @@ qemuGetProcessInfo(unsigned long long *cpuTime, int *lastCpu, long
*vm_rss,
}
+static int
+qemuDomainHelperGetVcpus(virDomainObjPtr vm, virVcpuInfoPtr info, int maxinfo,
+ unsigned char *cpumaps, int maplen)
+{
+ int v, maxcpu, hostcpus;
+ size_t i;
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+
+ if ((hostcpus = nodeGetCPUCount()) < 0)
+ return -1;
+
+ maxcpu = maplen * 8;
+ if (maxcpu > hostcpus)
+ maxcpu = hostcpus;
+
+ /* Clamp to actual number of vcpus */
+ if (maxinfo > priv->nvcpupids)
+ maxinfo = priv->nvcpupids;
+
+ if (maxinfo >= 1) {
+ if (info != NULL) {
+ memset(info, 0, sizeof(*info) * maxinfo);
+ for (i = 0; i < maxinfo; i++) {
+ info[i].number = i;
+ info[i].state = VIR_VCPU_RUNNING;
+
+ if (priv->vcpupids != NULL &&
+ qemuGetProcessInfo(&(info[i].cpuTime),
+ &(info[i].cpu),
+ NULL,
+ vm->pid,
+ priv->vcpupids[i]) < 0) {
+ virReportSystemError(errno, "%s",
+ _("cannot get vCPU placement & pCPU
time"));
+ return -1;
+ }
+ }
+ }
+
+ if (cpumaps != NULL) {
+ memset(cpumaps, 0, maplen * maxinfo);
+ if (priv->vcpupids != NULL) {
+ for (v = 0; v < maxinfo; v++) {
+ unsigned char *cpumap = VIR_GET_CPUMAP(cpumaps, maplen, v);
+ virBitmapPtr map = NULL;
+ unsigned char *tmpmap = NULL;
+ int tmpmapLen = 0;
+
+ if (virProcessGetAffinity(priv->vcpupids[v],
+ &map, maxcpu) < 0)
+ return -1;
+ virBitmapToData(map, &tmpmap, &tmpmapLen);
+ if (tmpmapLen > maplen)
+ tmpmapLen = maplen;
+ memcpy(cpumap, tmpmap, tmpmapLen);
+
+ VIR_FREE(tmpmap);
+ virBitmapFree(map);
+ }
+ } else {
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("cpu affinity is not
available"));
+ return -1;
+ }
+ }
+ }
+ return maxinfo;
+}
+
+
static virDomainPtr qemuDomainLookupByID(virConnectPtr conn,
int id)
{
@@ -4994,10 +5064,7 @@ qemuDomainGetVcpus(virDomainPtr dom,
int maplen)
{
virDomainObjPtr vm;
- size_t i;
- int v, maxcpu, hostcpus;
int ret = -1;
- qemuDomainObjPrivatePtr priv;
if (!(vm = qemuDomObjFromDomain(dom)))
goto cleanup;
@@ -5012,67 +5079,7 @@ qemuDomainGetVcpus(virDomainPtr dom,
goto cleanup;
}
- priv = vm->privateData;
-
- if ((hostcpus = nodeGetCPUCount()) < 0)
- goto cleanup;
-
- maxcpu = maplen * 8;
- if (maxcpu > hostcpus)
- maxcpu = hostcpus;
-
- /* Clamp to actual number of vcpus */
- if (maxinfo > priv->nvcpupids)
- maxinfo = priv->nvcpupids;
-
- if (maxinfo >= 1) {
- if (info != NULL) {
- memset(info, 0, sizeof(*info) * maxinfo);
- for (i = 0; i < maxinfo; i++) {
- info[i].number = i;
- info[i].state = VIR_VCPU_RUNNING;
-
- if (priv->vcpupids != NULL &&
- qemuGetProcessInfo(&(info[i].cpuTime),
- &(info[i].cpu),
- NULL,
- vm->pid,
- priv->vcpupids[i]) < 0) {
- virReportSystemError(errno, "%s",
- _("cannot get vCPU placement & pCPU
time"));
- goto cleanup;
- }
- }
- }
-
- if (cpumaps != NULL) {
- memset(cpumaps, 0, maplen * maxinfo);
- if (priv->vcpupids != NULL) {
- for (v = 0; v < maxinfo; v++) {
- unsigned char *cpumap = VIR_GET_CPUMAP(cpumaps, maplen, v);
- virBitmapPtr map = NULL;
- unsigned char *tmpmap = NULL;
- int tmpmapLen = 0;
-
- if (virProcessGetAffinity(priv->vcpupids[v],
- &map, maxcpu) < 0)
- goto cleanup;
- virBitmapToData(map, &tmpmap, &tmpmapLen);
- if (tmpmapLen > maplen)
- tmpmapLen = maplen;
- memcpy(cpumap, tmpmap, tmpmapLen);
-
- VIR_FREE(tmpmap);
- virBitmapFree(map);
- }
- } else {
- virReportError(VIR_ERR_OPERATION_INVALID,
- "%s", _("cpu affinity is not
available"));
- goto cleanup;
- }
- }
- }
- ret = maxinfo;
+ ret = qemuDomainHelperGetVcpus(vm, info, maxinfo, cpumaps, maplen);
cleanup:
if (vm)
@@ -17457,6 +17464,80 @@ qemuDomainGetStatsBalloon(virConnectPtr conn,
return 0;
}
+
+static int
+qemuDomainGetStatsVcpu(virConnectPtr conn ATTRIBUTE_UNUSED,
+ virDomainObjPtr dom,
+ virDomainStatsRecordPtr record,
+ int *maxparams,
+ unsigned int privflags ATTRIBUTE_UNUSED)
+{
+ size_t i;
+ int ret = -1;
+ char param_name[VIR_TYPED_PARAM_FIELD_LENGTH];
+ virVcpuInfoPtr cpuinfo = NULL;
+
+ if (virTypedParamsAddUInt(&record->params,
+ &record->nparams,
+ maxparams,
+ "vcpu.current",
+ (unsigned) dom->def->vcpus) < 0)
+ return -1;
+
+ if (virTypedParamsAddUInt(&record->params,
+ &record->nparams,
+ maxparams,
+ "vcpu.maximum",
+ (unsigned) dom->def->maxvcpus) < 0)
+ return -1;
+
+ if (VIR_ALLOC_N(cpuinfo, dom->def->vcpus) < 0)
+ return -1;
+
+ if ((ret = qemuDomainHelperGetVcpus(dom,
+ cpuinfo,
+ dom->def->vcpus,
+ NULL,
+ 0)) < 0)
+ return 0;
+
+ for (i = 0; i < dom->def->vcpus; i++) {
+ snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH,
+ "vcpu.%u.state", cpuinfo[i].number);
+ if (virTypedParamsAddInt(&record->params,
+ &record->nparams,
+ maxparams,
+ param_name,
+ cpuinfo[i].state) < 0)
+ goto cleanup;
+
+ snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH,
+ "vcpu.%u.time", cpuinfo[i].number);
+ if (virTypedParamsAddULLong(&record->params,
+ &record->nparams,
+ maxparams,
+ param_name,
+ cpuinfo[i].cpuTime) < 0)
+ goto cleanup;
+
+ snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH,
+ "vcpu.%u.cpu", cpuinfo[i].number);
+ if (virTypedParamsAddInt(&record->params,
+ &record->nparams,
+ maxparams,
+ param_name,
+ cpuinfo[i].cpu) < 0)
+ goto cleanup;
+ }
+
+ ret = 0;
+
+ cleanup:
+ VIR_FREE(cpuinfo);
+ return ret;
+}
+
+
typedef int
(*qemuDomainGetStatsFunc)(virConnectPtr conn,
virDomainObjPtr dom,
@@ -17474,6 +17555,7 @@ static struct qemuDomainGetStatsWorker qemuDomainGetStatsWorkers[]
= {
{ qemuDomainGetStatsState, VIR_DOMAIN_STATS_STATE, false },
{ qemuDomainGetStatsCpu, VIR_DOMAIN_STATS_CPU_TOTAL, false },
{ qemuDomainGetStatsBalloon, VIR_DOMAIN_STATS_BALLOON, true },
+ { qemuDomainGetStatsVcpu, VIR_DOMAIN_STATS_VCPU, false },
{ NULL, 0, false }
};
--
1.9.3