Currently, virCgroupGetPercpuStats is only used by the LXC driver,
filling out the CPUTIME stats. qemuDomainGetPercpuStats does this
and also filles out VCPUTIME stats.
Extend virCgroupGetPercpuStats to also report VCPUTIME stats if
nvcpupids is non-zero. In the LXC driver, we don't have cpupids.
In the QEMU driver, there is at least one cpupid for a running domain,
so the behavior shouldn't change for QEMU either.
Also rename getSumVcpuPercpuStats to virCgroupGetPercpuVcpuSum.
---
src/lxc/lxc_driver.c | 2 +-
src/qemu/qemu_driver.c | 163 +------------------------------------------------
src/util/vircgroup.c | 99 +++++++++++++++++++++++++++++-
src/util/vircgroup.h | 3 +-
tests/vircgrouptest.c | 2 +-
5 files changed, 102 insertions(+), 167 deletions(-)
diff --git a/src/lxc/lxc_driver.c b/src/lxc/lxc_driver.c
index b900bc6..60f741f 100644
--- a/src/lxc/lxc_driver.c
+++ b/src/lxc/lxc_driver.c
@@ -5659,7 +5659,7 @@ lxcDomainGetCPUStats(virDomainPtr dom,
params, nparams);
else
ret = virCgroupGetPercpuStats(priv->cgroup, params,
- nparams, start_cpu, ncpus);
+ nparams, start_cpu, ncpus, 0);
cleanup:
if (vm)
virObjectUnlock(vm);
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index da976b3..68e2741 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -15977,165 +15977,6 @@ qemuDomainGetMetadata(virDomainPtr dom,
return ret;
}
-/* This function gets the sums of cpu time consumed by all vcpus.
- * For example, if there are 4 physical cpus, and 2 vcpus in a domain,
- * then for each vcpu, the cpuacct.usage_percpu looks like this:
- * t0 t1 t2 t3
- * and we have 2 groups of such data:
- * v\p 0 1 2 3
- * 0 t00 t01 t02 t03
- * 1 t10 t11 t12 t13
- * for each pcpu, the sum is cpu time consumed by all vcpus.
- * s0 = t00 + t10
- * s1 = t01 + t11
- * s2 = t02 + t12
- * s3 = t03 + t13
- */
-static int
-getSumVcpuPercpuStats(virCgroupPtr group,
- unsigned int nvcpupids,
- unsigned long long *sum_cpu_time,
- unsigned int num)
-{
- int ret = -1;
- size_t i;
- char *buf = NULL;
- virCgroupPtr group_vcpu = NULL;
-
- for (i = 0; i < nvcpupids; i++) {
- char *pos;
- unsigned long long tmp;
- size_t j;
-
- if (virCgroupNewVcpu(group, i, false, &group_vcpu) < 0)
- goto cleanup;
-
- if (virCgroupGetCpuacctPercpuUsage(group_vcpu, &buf) < 0)
- goto cleanup;
-
- pos = buf;
- for (j = 0; j < num; j++) {
- if (virStrToLong_ull(pos, &pos, 10, &tmp) < 0) {
- virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("cpuacct parse error"));
- goto cleanup;
- }
- sum_cpu_time[j] += tmp;
- }
-
- virCgroupFree(&group_vcpu);
- VIR_FREE(buf);
- }
-
- ret = 0;
- cleanup:
- virCgroupFree(&group_vcpu);
- VIR_FREE(buf);
- return ret;
-}
-
-static int
-qemuDomainGetPercpuStats(virCgroupPtr group,
- virTypedParameterPtr params,
- unsigned int nparams,
- int start_cpu,
- unsigned int ncpus,
- unsigned int nvcpupids)
-{
- int rv = -1;
- size_t i;
- int id, max_id;
- char *pos;
- char *buf = NULL;
- unsigned long long *sum_cpu_time = NULL;
- unsigned long long *sum_cpu_pos;
- unsigned int n = 0;
- virTypedParameterPtr ent;
- int param_idx;
- unsigned long long cpu_time;
-
- /* return the number of supported params */
- if (nparams == 0 && ncpus != 0)
- return QEMU_NB_PER_CPU_STAT_PARAM;
-
- /* To parse account file, we need to know how many cpus are present. */
- max_id = nodeGetCPUCount();
- if (max_id < 0)
- return rv;
-
- if (ncpus == 0) { /* returns max cpu ID */
- rv = max_id;
- goto cleanup;
- }
-
- if (start_cpu > max_id) {
- virReportError(VIR_ERR_INVALID_ARG,
- _("start_cpu %d larger than maximum of %d"),
- start_cpu, max_id);
- goto cleanup;
- }
-
- /* we get percpu cputime accounting info. */
- if (virCgroupGetCpuacctPercpuUsage(group, &buf))
- goto cleanup;
- pos = buf;
-
- /* return percpu cputime in index 0 */
- param_idx = 0;
-
- /* number of cpus to compute */
- if (start_cpu >= max_id - ncpus)
- id = max_id - 1;
- else
- id = start_cpu + ncpus - 1;
-
- for (i = 0; i <= id; i++) {
- if (virStrToLong_ull(pos, &pos, 10, &cpu_time) < 0) {
- virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("cpuacct parse error"));
- goto cleanup;
- } else {
- n++;
- }
- if (i < start_cpu)
- continue;
- ent = ¶ms[(i - start_cpu) * nparams + param_idx];
- if (virTypedParameterAssign(ent, VIR_DOMAIN_CPU_STATS_CPUTIME,
- VIR_TYPED_PARAM_ULLONG, cpu_time) < 0)
- goto cleanup;
- }
-
- /* return percpu vcputime in index 1 */
- if (++param_idx >= nparams) {
- rv = nparams;
- goto cleanup;
- }
-
- if (VIR_ALLOC_N(sum_cpu_time, n) < 0)
- goto cleanup;
- if (getSumVcpuPercpuStats(group, nvcpupids, sum_cpu_time, n) < 0)
- goto cleanup;
-
- sum_cpu_pos = sum_cpu_time;
- for (i = 0; i <= id; i++) {
- cpu_time = *(sum_cpu_pos++);
- if (i < start_cpu)
- continue;
- if (virTypedParameterAssign(¶ms[(i - start_cpu) * nparams +
- param_idx],
- VIR_DOMAIN_CPU_STATS_VCPUTIME,
- VIR_TYPED_PARAM_ULLONG,
- cpu_time) < 0)
- goto cleanup;
- }
-
- rv = param_idx + 1;
- cleanup:
- VIR_FREE(sum_cpu_time);
- VIR_FREE(buf);
- return rv;
-}
-
static int
qemuDomainGetCPUStats(virDomainPtr domain,
@@ -16177,8 +16018,8 @@ qemuDomainGetCPUStats(virDomainPtr domain,
ret = virCgroupGetDomainTotalCpuStats(priv->cgroup,
params, nparams);
else
- ret = qemuDomainGetPercpuStats(priv->cgroup, params, nparams,
- start_cpu, ncpus, priv->nvcpupids);
+ ret = virCgroupGetPercpuStats(priv->cgroup, params, nparams,
+ start_cpu, ncpus, priv->nvcpupids);
cleanup:
if (vm)
virObjectUnlock(vm);
diff --git a/src/util/vircgroup.c b/src/util/vircgroup.c
index 1ff3dad..7a7f52b 100644
--- a/src/util/vircgroup.c
+++ b/src/util/vircgroup.c
@@ -2832,25 +2832,91 @@ virCgroupDenyDevicePath(virCgroupPtr group, const char *path, int
perms)
}
+/* This function gets the sums of cpu time consumed by all vcpus.
+ * For example, if there are 4 physical cpus, and 2 vcpus in a domain,
+ * then for each vcpu, the cpuacct.usage_percpu looks like this:
+ * t0 t1 t2 t3
+ * and we have 2 groups of such data:
+ * v\p 0 1 2 3
+ * 0 t00 t01 t02 t03
+ * 1 t10 t11 t12 t13
+ * for each pcpu, the sum is cpu time consumed by all vcpus.
+ * s0 = t00 + t10
+ * s1 = t01 + t11
+ * s2 = t02 + t12
+ * s3 = t03 + t13
+ */
+static int
+virCgroupGetPercpuVcpuSum(virCgroupPtr group,
+ unsigned int nvcpupids,
+ unsigned long long *sum_cpu_time,
+ unsigned int num)
+{
+ int ret = -1;
+ size_t i;
+ char *buf = NULL;
+ virCgroupPtr group_vcpu = NULL;
+
+ for (i = 0; i < nvcpupids; i++) {
+ char *pos;
+ unsigned long long tmp;
+ size_t j;
+
+ if (virCgroupNewVcpu(group, i, false, &group_vcpu) < 0)
+ goto cleanup;
+
+ if (virCgroupGetCpuacctPercpuUsage(group_vcpu, &buf) < 0)
+ goto cleanup;
+
+ pos = buf;
+ for (j = 0; j < num; j++) {
+ if (virStrToLong_ull(pos, &pos, 10, &tmp) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("cpuacct parse error"));
+ goto cleanup;
+ }
+ sum_cpu_time[j] += tmp;
+ }
+
+ virCgroupFree(&group_vcpu);
+ VIR_FREE(buf);
+ }
+
+ ret = 0;
+ cleanup:
+ virCgroupFree(&group_vcpu);
+ VIR_FREE(buf);
+ return ret;
+}
+
+
int
virCgroupGetPercpuStats(virCgroupPtr group,
virTypedParameterPtr params,
unsigned int nparams,
int start_cpu,
- unsigned int ncpus)
+ unsigned int ncpus,
+ unsigned int nvcpupids)
{
int rv = -1;
size_t i;
int id, max_id;
char *pos;
char *buf = NULL;
+ unsigned long long *sum_cpu_time = NULL;
+ unsigned long long *sum_cpu_pos;
+ unsigned int n = 0;
virTypedParameterPtr ent;
int param_idx;
unsigned long long cpu_time;
/* return the number of supported params */
- if (nparams == 0 && ncpus != 0)
- return CGROUP_NB_PER_CPU_STAT_PARAM;
+ if (nparams == 0 && ncpus != 0) {
+ if (nvcpupids == 0)
+ return CGROUP_NB_PER_CPU_STAT_PARAM;
+ else
+ return CGROUP_NB_PER_CPU_STAT_PARAM + 1;
+ }
/* To parse account file, we need to know how many cpus are present. */
max_id = nodeGetCPUCount();
@@ -2888,6 +2954,8 @@ virCgroupGetPercpuStats(virCgroupPtr group,
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("cpuacct parse error"));
goto cleanup;
+ } else {
+ n++;
}
if (i < start_cpu)
continue;
@@ -2897,9 +2965,34 @@ virCgroupGetPercpuStats(virCgroupPtr group,
goto cleanup;
}
+ if (nvcpupids == 0 || param_idx + 1 >= nparams)
+ goto success;
+ /* return percpu vcputime in index 1 */
+ param_idx++;
+
+ if (VIR_ALLOC_N(sum_cpu_time, n) < 0)
+ goto cleanup;
+ if (virCgroupGetPercpuVcpuSum(group, nvcpupids, sum_cpu_time, n) < 0)
+ goto cleanup;
+
+ sum_cpu_pos = sum_cpu_time;
+ for (i = 0; i <= id; i++) {
+ cpu_time = *(sum_cpu_pos++);
+ if (i < start_cpu)
+ continue;
+ if (virTypedParameterAssign(¶ms[(i - start_cpu) * nparams +
+ param_idx],
+ VIR_DOMAIN_CPU_STATS_VCPUTIME,
+ VIR_TYPED_PARAM_ULLONG,
+ cpu_time) < 0)
+ goto cleanup;
+ }
+
+ success:
rv = param_idx + 1;
cleanup:
+ VIR_FREE(sum_cpu_time);
VIR_FREE(buf);
return rv;
}
diff --git a/src/util/vircgroup.h b/src/util/vircgroup.h
index ecac9a0..7bb46bf 100644
--- a/src/util/vircgroup.h
+++ b/src/util/vircgroup.h
@@ -206,7 +206,8 @@ virCgroupGetPercpuStats(virCgroupPtr group,
virTypedParameterPtr params,
unsigned int nparams,
int start_cpu,
- unsigned int ncpus);
+ unsigned int ncpus,
+ unsigned int nvcpupids);
int
virCgroupGetDomainTotalCpuStats(virCgroupPtr group,
diff --git a/tests/vircgrouptest.c b/tests/vircgrouptest.c
index dd078c1..35ac0c0 100644
--- a/tests/vircgrouptest.c
+++ b/tests/vircgrouptest.c
@@ -560,7 +560,7 @@ static int testCgroupGetPercpuStats(const void *args
ATTRIBUTE_UNUSED)
if ((rv = virCgroupGetPercpuStats(cgroup,
params,
- 2, 0, 1)) < 0) {
+ 2, 0, 1, 0)) < 0) {
fprintf(stderr, "Failed call to virCgroupGetPercpuStats for /virtualmachines
cgroup: %d\n", -rv);
goto cleanup;
}
--
1.8.3.2