On Thu, 2016-01-14 at 13:28 +0300, Alexander Burluka wrote:
Signed-off-by: Alexander Burluka <aburluka(a)virtuozzo.com>
---
src/qemu/qemu_driver.c | 120 ++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 118 insertions(+), 2 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 48aeab6..6a0fa9b 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -8906,7 +8906,7 @@ static char *qemuDomainGetSchedulerType(virDomainPtr dom,
/* Domain not running, thus no cgroups - return defaults */
if (!virDomainObjIsActive(vm)) {
if (nparams)
- *nparams = 5;
+ *nparams = 7;
ignore_value(VIR_STRDUP(ret, "posix"));
goto cleanup;
}
@@ -8919,7 +8919,7 @@ static char *qemuDomainGetSchedulerType(virDomainPtr dom,
if (nparams) {
if (virCgroupSupportsCpuBW(priv->cgroup))
- *nparams = 5;
+ *nparams = 7;
else
*nparams = 1;
}
@@ -10234,6 +10234,31 @@ qemuDomainGetNumaParameters(virDomainPtr dom,
}
static int
+qemuSetGlobalBWLive(virDomainObjPtr vm ATTRIBUTE_UNUSED, virCgroupPtr cgroup,
+ unsigned long long period, long long quota)
+{
+ virCgroupPtr cgroup_global = NULL;
+
+ if (period == 0 && quota == 0)
+ return 0;
+
+ if (virCgroupNewThread(cgroup, VIR_CGROUP_THREAD_GLOBAL, 0,
+ false, &cgroup_global) < 0)
+ goto cleanup;
The same thing as in 7th patch - you could just use cgroup here.
+
+ if (qemuSetupBandwidthCgroup(cgroup_global, period, quota) < 0)
+ goto cleanup;
+
+ virCgroupFree(&cgroup_global);
+
+ return 0;
+
+ cleanup:
+ virCgroupFree(&cgroup_global);
+ return -1;
+}
+
+static int
qemuSetVcpusBWLive(virDomainObjPtr vm, virCgroupPtr cgroup,
unsigned long long period, long long quota)
{
@@ -10329,6 +10354,10 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom,
VIR_TYPED_PARAM_ULLONG,
VIR_DOMAIN_SCHEDULER_VCPU_QUOTA,
VIR_TYPED_PARAM_LLONG,
+ VIR_DOMAIN_SCHEDULER_GLOBAL_PERIOD,
+ VIR_TYPED_PARAM_ULLONG,
+ VIR_DOMAIN_SCHEDULER_GLOBAL_QUOTA,
+ VIR_TYPED_PARAM_LLONG,
VIR_DOMAIN_SCHEDULER_EMULATOR_PERIOD,
VIR_TYPED_PARAM_ULLONG,
VIR_DOMAIN_SCHEDULER_EMULATOR_QUOTA,
@@ -10445,6 +10474,46 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom,
if (flags & VIR_DOMAIN_AFFECT_CONFIG)
vmdef->cputune.quota = value_l;
+ } else if (STREQ(param->field, VIR_DOMAIN_SCHEDULER_GLOBAL_PERIOD)) {
+ SCHED_RANGE_CHECK(value_ul, VIR_DOMAIN_SCHEDULER_GLOBAL_PERIOD,
+ QEMU_SCHED_MIN_PERIOD, QEMU_SCHED_MAX_PERIOD);
+
+ if (flags & VIR_DOMAIN_AFFECT_LIVE && value_ul) {
+ if ((rc = qemuSetGlobalBWLive(vm, priv->cgroup, value_ul, 0)))
+ goto endjob;
+
+ vm->def->cputune.global_period = value_ul;
+
+ if (virTypedParamsAddULLong(&eventParams, &eventNparams,
+ &eventMaxNparams,
+ VIR_DOMAIN_TUNABLE_CPU_GLOBAL_PERIOD
,
+ value_ul) < 0)
+ goto endjob;
+ }
+
+ if (flags & VIR_DOMAIN_AFFECT_CONFIG)
+ vmdef->cputune.period = params[i].value.ul;
+
+ } else if (STREQ(param->field, VIR_DOMAIN_SCHEDULER_GLOBAL_QUOTA)) {
+ SCHED_RANGE_CHECK(value_l, VIR_DOMAIN_SCHEDULER_GLOBAL_QUOTA,
+ QEMU_SCHED_MIN_QUOTA, QEMU_SCHED_MAX_QUOTA);
+
+ if (flags & VIR_DOMAIN_AFFECT_LIVE && value_l) {
+ if ((rc = qemuSetGlobalBWLive(vm, priv->cgroup, 0, value_l)))
+ goto endjob;
+
+ vm->def->cputune.global_quota = value_l;
+
+ if (virTypedParamsAddLLong(&eventParams, &eventNparams,
+ &eventMaxNparams,
+ VIR_DOMAIN_TUNABLE_CPU_GLOBAL_QUOTA,
+ value_l) < 0)
+ goto endjob;
+ }
+
+ if (flags & VIR_DOMAIN_AFFECT_CONFIG)
+ vmdef->cputune.global_quota = value_l;
+
} else if (STREQ(param->field, VIR_DOMAIN_SCHEDULER_EMULATOR_PERIOD)) {
SCHED_RANGE_CHECK(value_ul, VIR_DOMAIN_SCHEDULER_EMULATOR_PERIOD,
QEMU_SCHED_MIN_PERIOD, QEMU_SCHED_MAX_PERIOD);
@@ -10609,6 +10678,27 @@ qemuGetEmulatorBandwidthLive(virCgroupPtr cgroup,
}
static int
+qemuGetGlobalBWLive(virCgroupPtr cgroup, unsigned long long *period,
+ long long *quota)
+{
+ virCgroupPtr global_cgroup= NULL;
+ int ret = -1;
+
+ if (virCgroupNewThread(cgroup, VIR_CGROUP_THREAD_GLOBAL, 0,
+ false, &global_cgroup) < 0)
+ goto cleanup;
+
+ if (qemuGetVcpuBWLive(global_cgroup, period, quota) < 0)
+ goto cleanup;
+
+ ret = 0;
+
+ cleanup:
+ virCgroupFree(&global_cgroup);
+ return ret;
+}
+
+static int
qemuDomainGetSchedulerParametersFlags(virDomainPtr dom,
virTypedParameterPtr params,
int *nparams,
@@ -10619,6 +10709,8 @@ qemuDomainGetSchedulerParametersFlags(virDomainPtr dom,
unsigned long long shares;
unsigned long long period;
long long quota;
+ unsigned long long global_period;
+ long long global_quota;
unsigned long long emulator_period;
long long emulator_quota;
int ret = -1;
@@ -10665,6 +10757,8 @@ qemuDomainGetSchedulerParametersFlags(virDomainPtr dom,
if (*nparams > 1) {
period = persistentDef->cputune.period;
quota = persistentDef->cputune.quota;
+ global_period = persistentDef->cputune.global_period;
+ global_quota = persistentDef->cputune.global_quota;
emulator_period = persistentDef->cputune.emulator_period;
emulator_quota = persistentDef->cputune.emulator_quota;
cpu_bw_status = true; /* Allow copy of data to params[] */
@@ -10694,6 +10788,12 @@ qemuDomainGetSchedulerParametersFlags(virDomainPtr dom,
goto cleanup;
}
+ if (*nparams > 5 && cpu_bw_status) {
+ rc = qemuGetGlobalBWLive(priv->cgroup, &global_period,
&global_quota);
+ if (rc != 0)
+ goto cleanup;
+ }
+
out:
if (virTypedParameterAssign(¶ms[0], VIR_DOMAIN_SCHEDULER_CPU_SHARES,
VIR_TYPED_PARAM_ULLONG, shares) < 0)
@@ -10734,6 +10834,22 @@ qemuDomainGetSchedulerParametersFlags(virDomainPtr dom,
goto cleanup;
saved_nparams++;
}
+
+ if (*nparams > saved_nparams) {
+ if (virTypedParameterAssign(¶ms[5],
+ VIR_DOMAIN_SCHEDULER_GLOBAL_PERIOD,
+ VIR_TYPED_PARAM_ULLONG, global_period) <
0)
+ goto cleanup;
+ saved_nparams++;
+ }
+
+ if (*nparams > saved_nparams) {
+ if (virTypedParameterAssign(¶ms[6],
+ VIR_DOMAIN_SCHEDULER_GLOBAL_QUOTA,
+ VIR_TYPED_PARAM_LLONG, global_quota) <
0)
+ goto cleanup;
+ saved_nparams++;
+ }
}
*nparams = saved_nparams;