allow the user change/get hypervisor's period and quota when the vm is running.
If the hypervisor's bandwidth is changed to unlimited, we should limit the vm's
bandwidth again. If we limit the hypervisor's bandwidth, there is no need to
limit the vm's bandwidth.
---
include/libvirt/libvirt.h.in | 16 +++++
src/qemu/qemu_driver.c | 147 +++++++++++++++++++++++++++++++++++++++++-
2 files changed, 162 insertions(+), 1 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
index 97ad99d..e015499 100644
--- a/include/libvirt/libvirt.h.in
+++ b/include/libvirt/libvirt.h.in
@@ -662,6 +662,22 @@ typedef virTypedParameter *virTypedParameterPtr;
#define VIR_DOMAIN_SCHEDULER_VCPU_QUOTA "vcpu_quota"
/**
+ * VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD:
+ *
+ * Macro represents the enforcement period for a quota, in microseconds,
+ * when using the posix scheduler, as a ullong.
+ */
+#define VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD "hypervisor_period"
+
+/**
+ * VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA:
+ *
+ * Macro represents the maximum bandwidth to be used within a period,
+ * when using the posix scheduler, as an llong.
+ */
+#define VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA "hypervisor_quota"
+
+/**
* VIR_DOMAIN_SCHEDULER_WEIGHT:
*
* Macro represents the relative weight, when using the credit
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 2e40aee..165d1c0 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -5973,7 +5973,7 @@ static char *qemuGetSchedulerType(virDomainPtr dom,
else if (rc == 0)
*nparams = 1;
else
- *nparams = 3;
+ *nparams = 5;
}
ret = strdup("posix");
@@ -7124,6 +7124,54 @@ cleanup:
}
static int
+qemuSetHypervisorBWLive(virDomainObjPtr vm, virCgroupPtr cgroup,
+ unsigned long long period, long long quota)
+{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+ virCgroupPtr cgroup_hypervisor = NULL;
+ int rc;
+
+ if (period == 0 && quota == 0)
+ return 0;
+
+ if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
+ return 0;
+ }
+
+ rc = virCgroupForHypervisor(cgroup, &cgroup_hypervisor, 0);
+ if (rc < 0) {
+ virReportSystemError(-rc,
+ _("Unable to find hypervisor cgroup for %s"),
+ vm->def->name);
+ goto cleanup;
+ }
+
+ /* we have limited hypervisor thread, so unlimit vm cgoup */
+ if (quota > 0 && qemuSetupCgroupVcpuBW(cgroup, 0, -1) < 0)
+ goto cleanup;
+
+ if (qemuSetupCgroupVcpuBW(cgroup_hypervisor, period, quota) < 0)
+ goto cleanup;
+
+ /*
+ * we have unlimited hypervisor thread, so limit vm cgoup again.
+ *
+ * We have ensured that we can multiply by vcpus without overflowing when
+ * setting vcpu's quota. So we donot check it here.
+ */
+ if (quota < 0 &&
+ qemuSetupCgroupVcpuBW(cgroup, vm->def->cputune.period,
+ vm->def->cputune.quota * vm->def->vcpus) <
0)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ virCgroupFree(&cgroup_hypervisor);
+ return -1;
+}
+
+static int
qemuSetSchedulerParametersFlags(virDomainPtr dom,
virTypedParameterPtr params,
int nparams,
@@ -7146,6 +7194,10 @@ qemuSetSchedulerParametersFlags(virDomainPtr dom,
VIR_TYPED_PARAM_ULLONG,
VIR_DOMAIN_SCHEDULER_VCPU_QUOTA,
VIR_TYPED_PARAM_LLONG,
+ VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD,
+ VIR_TYPED_PARAM_ULLONG,
+ VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA,
+ VIR_TYPED_PARAM_LLONG,
NULL) < 0)
return -1;
@@ -7228,6 +7280,32 @@ qemuSetSchedulerParametersFlags(virDomainPtr dom,
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
vmdef->cputune.quota = params[i].value.l;
}
+ } else if (STREQ(param->field, VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD)) {
+ if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+ rc = qemuSetHypervisorBWLive(vm, group, params[i].value.ul, 0);
+ if (rc != 0)
+ goto cleanup;
+
+ if (params[i].value.ul)
+ vm->def->cputune.hypervisor_period = params[i].value.ul;
+ }
+
+ if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
+ vmdef->cputune.hypervisor_period = params[i].value.ul;
+ }
+ } else if (STREQ(param->field, VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA)) {
+ if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+ rc = qemuSetHypervisorBWLive(vm, group, 0, params[i].value.l);
+ if (rc != 0)
+ goto cleanup;
+
+ if (params[i].value.l)
+ vm->def->cputune.hypervisor_quota = params[i].value.l;
+ }
+
+ if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
+ vmdef->cputune.hypervisor_quota = params[i].value.l;
+ }
}
}
@@ -7332,6 +7410,43 @@ cleanup:
}
static int
+qemuGetHypervisorBWLive(virDomainObjPtr vm, virCgroupPtr cgroup,
+ unsigned long long *period, long long *quota)
+{
+ virCgroupPtr cgroup_hypervisor = NULL;
+ qemuDomainObjPrivatePtr priv = NULL;
+ int rc;
+ int ret = -1;
+
+ priv = vm->privateData;
+ if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
+ /* We donot create sub dir for each vcpu */
+ *period = 0;
+ *quota = 0;
+ return 0;
+ }
+
+ /* get period and quota for hypervisor */
+ rc = virCgroupForHypervisor(cgroup, &cgroup_hypervisor, 0);
+ if (!cgroup_hypervisor) {
+ virReportSystemError(-rc,
+ _("Unable to find hypervisor cgroup for %s"),
+ vm->def->name);
+ goto cleanup;
+ }
+
+ rc = qemuGetVcpuBWLive(cgroup_hypervisor, period, quota);
+ if (rc < 0)
+ goto cleanup;
+
+ ret = 0;
+
+cleanup:
+ virCgroupFree(&cgroup_hypervisor);
+ return ret;
+}
+
+static int
qemuGetSchedulerParametersFlags(virDomainPtr dom,
virTypedParameterPtr params,
int *nparams,
@@ -7343,6 +7458,8 @@ qemuGetSchedulerParametersFlags(virDomainPtr dom,
unsigned long long shares;
unsigned long long period;
long long quota;
+ unsigned long long hypervisor_period;
+ long long hypervisor_quota;
int ret = -1;
int rc;
bool cpu_bw_status = false;
@@ -7382,6 +7499,8 @@ qemuGetSchedulerParametersFlags(virDomainPtr dom,
if (*nparams > 1 && cpu_bw_status) {
period = persistentDef->cputune.period;
quota = persistentDef->cputune.quota;
+ hypervisor_period = persistentDef->cputune.hypervisor_period;
+ hypervisor_quota = persistentDef->cputune.hypervisor_quota;
}
goto out;
}
@@ -7410,6 +7529,14 @@ qemuGetSchedulerParametersFlags(virDomainPtr dom,
if (rc != 0)
goto cleanup;
}
+
+ if (*nparams > 3 && cpu_bw_status) {
+ rc = qemuGetHypervisorBWLive(vm, group, &hypervisor_period,
+ &hypervisor_quota);
+ if (rc != 0)
+ goto cleanup;
+ }
+
out:
if (virTypedParameterAssign(¶ms[0], VIR_DOMAIN_SCHEDULER_CPU_SHARES,
VIR_TYPED_PARAM_ULLONG, shares) < 0)
@@ -7432,6 +7559,24 @@ out:
goto cleanup;
saved_nparams++;
}
+
+ if (*nparams > saved_nparams) {
+ if (virTypedParameterAssign(¶ms[3],
+ VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD,
+ VIR_TYPED_PARAM_ULLONG,
+ hypervisor_period) < 0)
+ goto cleanup;
+ saved_nparams++;
+ }
+
+ if (*nparams > saved_nparams) {
+ if (virTypedParameterAssign(¶ms[4],
+ VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA,
+ VIR_TYPED_PARAM_LLONG,
+ hypervisor_quota) < 0)
+ goto cleanup;
+ saved_nparams++;
+ }
}
*nparams = saved_nparams;
--
1.7.1