From: Wen Congyang <wency(a)cn.fujitsu.com>
allow the user change/get hypervisor's period and quota when the vm is running.
---
include/libvirt/libvirt.h.in | 16 +++++
src/qemu/qemu_driver.c | 133 +++++++++++++++++++++++++++++++++++++++++-
2 files changed, 148 insertions(+), 1 deletion(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
index 15c08c1..dd34295 100644
--- a/include/libvirt/libvirt.h.in
+++ b/include/libvirt/libvirt.h.in
@@ -692,6 +692,22 @@ typedef virTypedParameter *virTypedParameterPtr;
#define VIR_DOMAIN_SCHEDULER_VCPU_QUOTA "vcpu_quota"
/**
+ * VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD:
+ *
+ * Macro represents the enforcement period for a quota, in microseconds,
+ * when using the posix scheduler, as a ullong.
+ */
+#define VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD "hypervisor_period"
+
+/**
+ * VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA:
+ *
+ * Macro represents the maximum bandwidth to be used within a period,
+ * when using the posix scheduler, as an llong.
+ */
+#define VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA "hypervisor_quota"
+
+/**
* VIR_DOMAIN_SCHEDULER_WEIGHT:
*
* Macro represents the relative weight, when using the credit
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 7d1d093..e1274c2 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -6349,7 +6349,7 @@ static char *qemuGetSchedulerType(virDomainPtr dom,
else if (rc == 0)
*nparams = 1;
else
- *nparams = 3;
+ *nparams = 5;
}
ret = strdup("posix");
@@ -7420,6 +7420,40 @@ cleanup:
}
static int
+qemuSetHypervisorBWLive(virDomainObjPtr vm, virCgroupPtr cgroup,
+ unsigned long long period, long long quota)
+{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+ virCgroupPtr cgroup_hypervisor = NULL;
+ int rc;
+
+ if (period == 0 && quota == 0)
+ return 0;
+
+ if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
+ return 0;
+ }
+
+ rc = virCgroupForHypervisor(cgroup, &cgroup_hypervisor, 0);
+ if (rc < 0) {
+ virReportSystemError(-rc,
+ _("Unable to find hypervisor cgroup for %s"),
+ vm->def->name);
+ goto cleanup;
+ }
+
+ if (qemuSetupCgroupVcpuBW(cgroup_hypervisor, period, quota) < 0)
+ goto cleanup;
+
+ virCgroupFree(&cgroup_hypervisor);
+ return 0;
+
+cleanup:
+ virCgroupFree(&cgroup_hypervisor);
+ return -1;
+}
+
+static int
qemuSetSchedulerParametersFlags(virDomainPtr dom,
virTypedParameterPtr params,
int nparams,
@@ -7442,6 +7476,10 @@ qemuSetSchedulerParametersFlags(virDomainPtr dom,
VIR_TYPED_PARAM_ULLONG,
VIR_DOMAIN_SCHEDULER_VCPU_QUOTA,
VIR_TYPED_PARAM_LLONG,
+ VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD,
+ VIR_TYPED_PARAM_ULLONG,
+ VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA,
+ VIR_TYPED_PARAM_LLONG,
NULL) < 0)
return -1;
@@ -7524,6 +7562,32 @@ qemuSetSchedulerParametersFlags(virDomainPtr dom,
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
vmdef->cputune.quota = params[i].value.l;
}
+ } else if (STREQ(param->field, VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD)) {
+ if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+ rc = qemuSetHypervisorBWLive(vm, group, params[i].value.ul, 0);
+ if (rc != 0)
+ goto cleanup;
+
+ if (params[i].value.ul)
+ vm->def->cputune.hypervisor_period = params[i].value.ul;
+ }
+
+ if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
+ vmdef->cputune.hypervisor_period = params[i].value.ul;
+ }
+ } else if (STREQ(param->field, VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA)) {
+ if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+ rc = qemuSetHypervisorBWLive(vm, group, 0, params[i].value.l);
+ if (rc != 0)
+ goto cleanup;
+
+ if (params[i].value.l)
+ vm->def->cputune.hypervisor_quota = params[i].value.l;
+ }
+
+ if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
+ vmdef->cputune.hypervisor_quota = params[i].value.l;
+ }
}
}
@@ -7628,6 +7692,43 @@ cleanup:
}
static int
+qemuGetHypervisorBWLive(virDomainObjPtr vm, virCgroupPtr cgroup,
+ unsigned long long *period, long long *quota)
+{
+ virCgroupPtr cgroup_hypervisor = NULL;
+ qemuDomainObjPrivatePtr priv = NULL;
+ int rc;
+ int ret = -1;
+
+ priv = vm->privateData;
+ if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
+ /* We donot create sub dir for each vcpu */
+ *period = 0;
+ *quota = 0;
+ return 0;
+ }
+
+ /* get period and quota for hypervisor */
+ rc = virCgroupForHypervisor(cgroup, &cgroup_hypervisor, 0);
+ if (!cgroup_hypervisor) {
+ virReportSystemError(-rc,
+ _("Unable to find hypervisor cgroup for %s"),
+ vm->def->name);
+ goto cleanup;
+ }
+
+ rc = qemuGetVcpuBWLive(cgroup_hypervisor, period, quota);
+ if (rc < 0)
+ goto cleanup;
+
+ ret = 0;
+
+cleanup:
+ virCgroupFree(&cgroup_hypervisor);
+ return ret;
+}
+
+static int
qemuGetSchedulerParametersFlags(virDomainPtr dom,
virTypedParameterPtr params,
int *nparams,
@@ -7639,6 +7740,8 @@ qemuGetSchedulerParametersFlags(virDomainPtr dom,
unsigned long long shares;
unsigned long long period;
long long quota;
+ unsigned long long hypervisor_period;
+ long long hypervisor_quota;
int ret = -1;
int rc;
bool cpu_bw_status = false;
@@ -7678,6 +7781,8 @@ qemuGetSchedulerParametersFlags(virDomainPtr dom,
if (*nparams > 1 && cpu_bw_status) {
period = persistentDef->cputune.period;
quota = persistentDef->cputune.quota;
+ hypervisor_period = persistentDef->cputune.hypervisor_period;
+ hypervisor_quota = persistentDef->cputune.hypervisor_quota;
}
goto out;
}
@@ -7706,6 +7811,14 @@ qemuGetSchedulerParametersFlags(virDomainPtr dom,
if (rc != 0)
goto cleanup;
}
+
+ if (*nparams > 3 && cpu_bw_status) {
+ rc = qemuGetHypervisorBWLive(vm, group, &hypervisor_period,
+ &hypervisor_quota);
+ if (rc != 0)
+ goto cleanup;
+ }
+
out:
if (virTypedParameterAssign(¶ms[0], VIR_DOMAIN_SCHEDULER_CPU_SHARES,
VIR_TYPED_PARAM_ULLONG, shares) < 0)
@@ -7728,6 +7841,24 @@ out:
goto cleanup;
saved_nparams++;
}
+
+ if (*nparams > saved_nparams) {
+ if (virTypedParameterAssign(¶ms[3],
+ VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD,
+ VIR_TYPED_PARAM_ULLONG,
+ hypervisor_period) < 0)
+ goto cleanup;
+ saved_nparams++;
+ }
+
+ if (*nparams > saved_nparams) {
+ if (virTypedParameterAssign(¶ms[4],
+ VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA,
+ VIR_TYPED_PARAM_LLONG,
+ hypervisor_quota) < 0)
+ goto cleanup;
+ saved_nparams++;
+ }
}
*nparams = saved_nparams;
--
1.7.10.2