From: Wen Congyang <wency(a)cn.fujitsu.com>
allow the user change/get hypervisor's period and quota when the vm is running.
---
include/libvirt/libvirt.h.in | 16 +++++
src/qemu/qemu_driver.c | 133 +++++++++++++++++++++++++++++++++++++++++-
2 files changed, 148 insertions(+), 1 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
index 024c4ec..1a30426 100644
--- a/include/libvirt/libvirt.h.in
+++ b/include/libvirt/libvirt.h.in
@@ -662,6 +662,22 @@ typedef virTypedParameter *virTypedParameterPtr;
#define VIR_DOMAIN_SCHEDULER_VCPU_QUOTA "vcpu_quota"
/**
+ * VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD:
+ *
+ * Macro represents the enforcement period for a quota, in microseconds,
+ * when using the posix scheduler, as a ullong.
+ */
+#define VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD "hypervisor_period"
+
+/**
+ * VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA:
+ *
+ * Macro represents the maximum bandwidth to be used within a period,
+ * when using the posix scheduler, as an llong.
+ */
+#define VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA "hypervisor_quota"
+
+/**
* VIR_DOMAIN_SCHEDULER_WEIGHT:
*
* Macro represents the relative weight, when using the credit
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index c880f05..493f9c6 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -6087,7 +6087,7 @@ static char *qemuGetSchedulerType(virDomainPtr dom,
else if (rc == 0)
*nparams = 1;
else
- *nparams = 3;
+ *nparams = 5;
}
ret = strdup("posix");
@@ -7157,6 +7157,40 @@ cleanup:
}
static int
+qemuSetHypervisorBWLive(virDomainObjPtr vm, virCgroupPtr cgroup,
+ unsigned long long period, long long quota)
+{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+ virCgroupPtr cgroup_hypervisor = NULL;
+ int rc;
+
+ if (period == 0 && quota == 0)
+ return 0;
+
+ if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
+ return 0;
+ }
+
+ rc = virCgroupForHypervisor(cgroup, &cgroup_hypervisor, 0);
+ if (rc < 0) {
+ virReportSystemError(-rc,
+ _("Unable to find hypervisor cgroup for %s"),
+ vm->def->name);
+ goto cleanup;
+ }
+
+ if (qemuSetupCgroupCpuBandwidth(cgroup_hypervisor, period, quota) < 0)
+ goto cleanup;
+
+ virCgroupFree(&cgroup_hypervisor);
+ return 0;
+
+cleanup:
+ virCgroupFree(&cgroup_hypervisor);
+ return -1;
+}
+
+static int
qemuSetSchedulerParametersFlags(virDomainPtr dom,
virTypedParameterPtr params,
int nparams,
@@ -7179,6 +7213,10 @@ qemuSetSchedulerParametersFlags(virDomainPtr dom,
VIR_TYPED_PARAM_ULLONG,
VIR_DOMAIN_SCHEDULER_VCPU_QUOTA,
VIR_TYPED_PARAM_LLONG,
+ VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD,
+ VIR_TYPED_PARAM_ULLONG,
+ VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA,
+ VIR_TYPED_PARAM_LLONG,
NULL) < 0)
return -1;
@@ -7261,6 +7299,32 @@ qemuSetSchedulerParametersFlags(virDomainPtr dom,
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
vmdef->cputune.quota = params[i].value.l;
}
+ } else if (STREQ(param->field, VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD)) {
+ if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+ rc = qemuSetHypervisorBWLive(vm, group, params[i].value.ul, 0);
+ if (rc != 0)
+ goto cleanup;
+
+ if (params[i].value.ul)
+ vm->def->cputune.hypervisor_period = params[i].value.ul;
+ }
+
+ if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
+ vmdef->cputune.hypervisor_period = params[i].value.ul;
+ }
+ } else if (STREQ(param->field, VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA)) {
+ if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+ rc = qemuSetHypervisorBWLive(vm, group, 0, params[i].value.l);
+ if (rc != 0)
+ goto cleanup;
+
+ if (params[i].value.l)
+ vm->def->cputune.hypervisor_quota = params[i].value.l;
+ }
+
+ if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
+ vmdef->cputune.hypervisor_quota = params[i].value.l;
+ }
}
}
@@ -7365,6 +7429,43 @@ cleanup:
}
static int
+qemuGetHypervisorBWLive(virDomainObjPtr vm, virCgroupPtr cgroup,
+ unsigned long long *period, long long *quota)
+{
+ virCgroupPtr cgroup_hypervisor = NULL;
+ qemuDomainObjPrivatePtr priv = NULL;
+ int rc;
+ int ret = -1;
+
+ priv = vm->privateData;
+ if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
+ /* We donot create sub dir for each vcpu */
+ *period = 0;
+ *quota = 0;
+ return 0;
+ }
+
+ /* get period and quota for hypervisor */
+ rc = virCgroupForHypervisor(cgroup, &cgroup_hypervisor, 0);
+ if (!cgroup_hypervisor) {
+ virReportSystemError(-rc,
+ _("Unable to find hypervisor cgroup for %s"),
+ vm->def->name);
+ goto cleanup;
+ }
+
+ rc = qemuGetVcpuBWLive(cgroup_hypervisor, period, quota);
+ if (rc < 0)
+ goto cleanup;
+
+ ret = 0;
+
+cleanup:
+ virCgroupFree(&cgroup_hypervisor);
+ return ret;
+}
+
+static int
qemuGetSchedulerParametersFlags(virDomainPtr dom,
virTypedParameterPtr params,
int *nparams,
@@ -7376,6 +7477,8 @@ qemuGetSchedulerParametersFlags(virDomainPtr dom,
unsigned long long shares;
unsigned long long period;
long long quota;
+ unsigned long long hypervisor_period;
+ long long hypervisor_quota;
int ret = -1;
int rc;
bool cpu_bw_status = false;
@@ -7415,6 +7518,8 @@ qemuGetSchedulerParametersFlags(virDomainPtr dom,
if (*nparams > 1 && cpu_bw_status) {
period = persistentDef->cputune.period;
quota = persistentDef->cputune.quota;
+ hypervisor_period = persistentDef->cputune.hypervisor_period;
+ hypervisor_quota = persistentDef->cputune.hypervisor_quota;
}
goto out;
}
@@ -7443,6 +7548,14 @@ qemuGetSchedulerParametersFlags(virDomainPtr dom,
if (rc != 0)
goto cleanup;
}
+
+ if (*nparams > 3 && cpu_bw_status) {
+ rc = qemuGetHypervisorBWLive(vm, group, &hypervisor_period,
+ &hypervisor_quota);
+ if (rc != 0)
+ goto cleanup;
+ }
+
out:
if (virTypedParameterAssign(¶ms[0], VIR_DOMAIN_SCHEDULER_CPU_SHARES,
VIR_TYPED_PARAM_ULLONG, shares) < 0)
@@ -7465,6 +7578,24 @@ out:
goto cleanup;
saved_nparams++;
}
+
+ if (*nparams > saved_nparams) {
+ if (virTypedParameterAssign(¶ms[3],
+ VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD,
+ VIR_TYPED_PARAM_ULLONG,
+ hypervisor_period) < 0)
+ goto cleanup;
+ saved_nparams++;
+ }
+
+ if (*nparams > saved_nparams) {
+ if (virTypedParameterAssign(¶ms[4],
+ VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA,
+ VIR_TYPED_PARAM_LLONG,
+ hypervisor_quota) < 0)
+ goto cleanup;
+ saved_nparams++;
+ }
}
*nparams = saved_nparams;
--
1.7.4.4